source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
gramschmidt.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 512. */
#include "gramschmidt.h"
/* Array initialization. */
static
void init_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(R,NJ,NJ,nj,nj),
DATA_TYPE POLYBENCH_2D(Q,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
A[i][j] = ((DATA_TYPE) i*j) / ni;
Q[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
}
for (i = 0; i < nj; i++)
for (j = 0; j < nj; j++)
R[i][j] = ((DATA_TYPE) i*(j+2)) / nj;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(R,NJ,NJ,nj,nj),
DATA_TYPE POLYBENCH_2D(Q,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, A[i][j]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
for (i = 0; i < nj; i++)
for (j = 0; j < nj; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, R[i][j]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, Q[i][j]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_gramschmidt(int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(R,NJ,NJ,nj,nj),
DATA_TYPE POLYBENCH_2D(Q,NI,NJ,ni,nj))
{
int i, j, k;
DATA_TYPE nrm;
#pragma scop
#pragma omp parallel for private (i, j)
for (k = 0; k < _PB_NJ; k++)
{
nrm = 0;
for (i = 0; i < _PB_NI; i++)
nrm += A[i][k] * A[i][k];
R[k][k] = sqrt(nrm);
for (i = 0; i < _PB_NI; i++)
Q[i][k] = A[i][k] / R[k][k];
for (j = k + 1; j < _PB_NJ; j++)
{
R[k][j] = 0;
for (i = 0; i < _PB_NI; i++)
R[k][j] += Q[i][k] * A[i][j];
for (i = 0; i < _PB_NI; i++)
A[i][j] = A[i][j] - Q[i][k] * R[k][j];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,NI,NJ,ni,nj);
POLYBENCH_2D_ARRAY_DECL(R,DATA_TYPE,NJ,NJ,nj,nj);
POLYBENCH_2D_ARRAY_DECL(Q,DATA_TYPE,NI,NJ,ni,nj);
/* Initialize array(s). */
init_array (ni, nj,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(R),
POLYBENCH_ARRAY(Q));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_gramschmidt (ni, nj,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(R),
POLYBENCH_ARRAY(Q));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(R), POLYBENCH_ARRAY(Q)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(R);
POLYBENCH_FREE_ARRAY(Q);
return 0;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(4*t1+Ny+5,32)),floord(8*t2+Ny+4,32)),floord(8*t1-8*t2+Nz+Ny+3,32));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(32*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(32*t3+Nx+28,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),32*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),32*t3+30),256*t4+254),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
mkl_convolution-inl.h | /*******************************************************************************
* Copyright 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file mkl_convolution-inl.h
* \brief
* \author lingyan.guo@intel.com
* zhenlin.luo@intel.com
*
*******************************************************************************/
#ifndef MXNET_OPERATOR_MKL_MKL_CONVOLUTION_INL_H_
#define MXNET_OPERATOR_MKL_MKL_CONVOLUTION_INL_H_
#include <mxnet/storage.h>
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "../operator_common.h"
#include "./mkl_util-inl.h"
namespace mxnet {
namespace op {
template<typename xpu, typename DType>
class MKLConvolutionOp : public Operator {
public:
static std::string getName() {
return "MKLConvolutionOp";
}
void SetupBuffer() {
convolutionBwdBias = static_cast<dnnPrimitive_t>(NULL);
convolutionBwdFilter = static_cast<dnnPrimitive_t>(NULL);
convolutionBwdData = static_cast<dnnPrimitive_t>(NULL);
convolutionFwd = static_cast<dnnPrimitive_t>(NULL);
fwd_bottom_data = MKLData<DType>::create();
fwd_top_data = MKLData<DType>::create();
fwd_filter_data = MKLData<DType>::create();
fwd_bias_data = MKLData<DType>::create();
bwdd_top_diff = MKLData<DType>::create();
bwdd_bottom_diff = MKLData<DType>::create();
bwdd_filter_data = MKLData<DType>::create();
bwdf_top_diff = MKLData<DType>::create();
bwdf_filter_diff = MKLData<DType>::create();
bwdf_bottom_data = MKLData<DType>::create();
bwdb_top_diff = MKLData<DType>::create();
bwdb_bias_diff = MKLData<DType>::create();
// Names are for debugging purposes only.
fwd_bottom_data->name = "fwd_bottom_data @ " + this->getName();
fwd_top_data->name = "fwd_top_data @ " + this->getName();
fwd_filter_data->name = "fwd_filter_data @ " + this->getName();
fwd_bias_data->name = "fwd_bias_data @ " + this->getName();
bwdd_top_diff->name = "bwdd_top_diff @ " + this->getName();
bwdd_bottom_diff->name = "bwdd_bottom_diff @ " + this->getName();
bwdd_filter_data->name = "bwdd_filter_data @ " + this->getName();
bwdf_top_diff->name = "bwdf_top_diff @ " + this->getName();
bwdf_bottom_data->name = "bwdf_bottom_data @ " + this->getName();
bwdf_filter_diff->name = "bwdf_filter_diff @ " + this->getName();
bwdb_top_diff->name = "bwdb_top_diff @ " + this->getName();
bwdb_bias_diff->name = "bwdb_bias_diff @ " + this->getName();
}
explicit MKLConvolutionOp(ConvolutionParam p):
convolutionFwd(NULL),
convolutionBwdData(static_cast<dnnPrimitive_t>(NULL)),
convolutionBwdFilter(static_cast<dnnPrimitive_t>(NULL)),
convolutionBwdBias(static_cast<dnnPrimitive_t>(NULL)) {
this->param_ = p;
init_mkldnn_ = false;
// convert MBytes first to Bytes and then to elements.
param_.workspace = (param_.workspace << 20) / sizeof(DType);
SetupBuffer();
}
void ReleaseBuffer() {
if (convolutionFwd != NULL) {
dnnDelete<DType>(convolutionFwd);
convolutionFwd = NULL;
}
if (convolutionBwdData != NULL) {
dnnDelete<DType>(convolutionBwdData);
convolutionBwdData = NULL;
}
if (convolutionBwdFilter != NULL) {
dnnDelete<DType>(convolutionBwdFilter);
convolutionBwdFilter = NULL;
}
if (!param_.no_bias && convolutionBwdBias != NULL) {
dnnDelete<DType>(convolutionBwdBias);
convolutionBwdBias = NULL;
}
}
virtual ~MKLConvolutionOp() {
ReleaseBuffer();
}
private:
void LayerSetUp(const mshadow::Tensor<xpu, 4, DType> &data,
const mshadow::Tensor<xpu, 4, DType> &out) {
this->width_ = data.shape_[3];
this->height_ = data.shape_[2];
this->channels_ = data.shape_[1];
this->num_ = data.shape_[0];
this->group_ = param_.num_group;
this->width_out_ = out.shape_[3];
this->height_out_ = out.shape_[2];
int channel_out_ = out.shape_[1];
this->num_output_ = channel_out_;
kernel_w_ = param_.kernel[1];
kernel_h_ = param_.kernel[0];
stride_w_ = param_.stride[1];
stride_h_ = param_.stride[0];
pad_w_ = param_.pad[1];
pad_h_ = param_.pad[0];
int status;
size_t n, g;
size_t iw, ih, ic;
size_t ow, oh, oc;
size_t kw, kh;
size_t dimension = 4;
g = std::max(this->group_, 1);
n = this->num_;
iw = this->width_;
ih = this->height_;
ic = this->channels_;
ow = this->width_out_;
oh = this->height_out_;
oc = this->num_output_;
kw = this->kernel_w_;
kh = this->kernel_h_;
oc = this->num_output_;
size_t bdata_sizes[4] = { iw, ih, ic, n };
size_t bdata_strides[4] = { 1, iw, iw*ih, iw*ih*ic };
/* starting with MKL 2017 Gold in case of groups filter layout
* becomes 5D, i.e. groups become a separate dimension */
size_t g_mkl2017 = g;
size_t f_dimension = dimension + (g != 1);
if (getMKLBuildDate() < 20160701) {
g_mkl2017 = 1;
f_dimension = dimension;
}
size_t fdata_sizes[5] = { kw, kh, ic / g, oc / g_mkl2017, g_mkl2017 };
size_t fdata_strides[5] = { 1, kw, kw*kh, kw*kh*ic / g, kw*kh*ic / g*oc / g };
size_t bias_sizes[1] = { oc };
size_t bias_strides[1] = { 1 };
size_t tdata_sizes[4] = { ow, oh, oc, n };
size_t tdata_strides[4] = { 1, ow, ow*oh, ow*oh*oc };
size_t convolutionStrides[2] = { this->stride_w_, this->stride_h_ };
int inputOffset[2] = { -this->pad_w_, -this->pad_h_ };
// Names are for debugging purposes only.
/*** convolution section ***/
if (!param_.no_bias) {
status = dnnGroupsConvolutionCreateForwardBias<DType>(&convolutionFwd,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
} else {
status = dnnGroupsConvolutionCreateForward<DType>(&convolutionFwd,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
}
CHECK_EQ(status, 0)
<< "Failed dnnCreateConvolution<DType>(dnnForward) with status "
<< status << "\n";
fwd_bottom_data->create_layouts(convolutionFwd, dnnResourceSrc, dimension,
bdata_sizes, bdata_strides);
fwd_top_data->create_layouts(convolutionFwd, dnnResourceDst, dimension,
tdata_sizes, tdata_strides);
fwd_filter_data->create_layouts(convolutionFwd, dnnResourceFilter,
f_dimension, fdata_sizes, fdata_strides);
if (!param_.no_bias)
fwd_bias_data->create_layouts(convolutionFwd, dnnResourceBias, 1,
bias_sizes, bias_strides);
/*
* Backward by data layer setup
*/
status = dnnGroupsConvolutionCreateBackwardData<DType>(&convolutionBwdData,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardData with status "
<< status << "\n";
bwdd_bottom_diff->create_layouts(convolutionBwdData, dnnResourceDiffSrc,
dimension, bdata_sizes, bdata_strides);
bwdd_top_diff->create_layouts(convolutionBwdData, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdd_filter_data->create_layouts(convolutionBwdData, dnnResourceFilter,
f_dimension, fdata_sizes, fdata_strides);
/*
* Backward by filter layer setup
*/
status = dnnGroupsConvolutionCreateBackwardFilter<DType>(&convolutionBwdFilter,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardFilter with status "
<< status << "\n";
bwdf_bottom_data->create_layouts(convolutionBwdFilter, dnnResourceSrc,
dimension, bdata_sizes, bdata_strides);
bwdf_top_diff->create_layouts(convolutionBwdFilter, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdf_filter_diff->create_layouts(convolutionBwdFilter, dnnResourceDiffFilter,
f_dimension, fdata_sizes, fdata_strides);
/*
* Backward by bias layer setup
*/
if (!param_.no_bias) {
status = dnnGroupsConvolutionCreateBackwardBias<DType>(&convolutionBwdBias,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
tdata_sizes);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardBias with status "
<< status << "\n";
bwdb_top_diff->create_layouts(convolutionBwdBias, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdb_bias_diff->create_layouts(convolutionBwdBias, dnnResourceDiffBias, 1,
bias_sizes, bias_strides);
}
}
public:
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
DType *data_ptr = NULL;
DType *wmat_ptr = NULL;
DType *out_ptr = NULL;
Tensor<xpu, 4, DType> data =
mkl_experimental_direct_get<xpu, 4, DType>(in_data[conv::kData], s);
Tensor<xpu, 4, DType> out =
mkl_experimental_direct_get<xpu, 4, DType>(out_data[conv::kOut], s);
Tensor<xpu, 4, DType> wmat =
mkl_experimental_direct_get<xpu, 4, DType>(in_data[conv::kWeight], s);
if (!init_mkldnn_) {
LayerSetUp(data, out);
init_mkldnn_ = true;
}
CHECK_EQ(data.CheckContiguous(), true);
CHECK_EQ(wmat.CheckContiguous(), true);
CHECK_EQ(out.CheckContiguous(), true);
data_ptr = data.dptr_;
wmat_ptr = wmat.dptr_;
out_ptr = out.dptr_;
int status;
void *res_convolutionFwd[dnnResourceNumber];
std::shared_ptr<MKLMemHolder> in_data_mem =
#if MKL_EXPERIMENTAL == 1
in_data[conv::kData].Mkl_mem_;
#else
NULL;
#endif
res_convolutionFwd[dnnResourceSrc] =
fwd_bottom_data->get_converted_prv(data_ptr, false, in_data_mem);
std::shared_ptr<MKLMemHolder> in_weight_mem =
#if MKL_EXPERIMENTAL == 1
in_data[conv::kWeight].Mkl_mem_;
#else
NULL;
#endif
res_convolutionFwd[dnnResourceFilter] =
fwd_filter_data->get_converted_prv(wmat_ptr, true, in_weight_mem);
if (!param_.no_bias) {
Tensor<xpu, 1, DType> bias =
mkl_experimental_direct_get<xpu, 1, DType>(in_data[conv::kBias], s);
std::shared_ptr<MKLMemHolder> in_bias_mem =
#if MKL_EXPERIMENTAL == 1
in_data[conv::kBias].Mkl_mem_;
#else
NULL;
#endif
res_convolutionFwd[dnnResourceBias] =
fwd_bias_data->get_converted_prv(bias.dptr_, true, in_bias_mem);
}
std::shared_ptr<MKLMemHolder> top_mem =
#if MKL_EXPERIMENTAL == 1
out_data[conv::kOut].Mkl_mem_;
#else
NULL;
#endif
if (fwd_top_data->conversion_needed()) {
res_convolutionFwd[dnnResourceDst] =
reinterpret_cast<void *>(fwd_top_data->prv_ptr());
#if MKL_EXPERIMENTAL == 1
top_mem->set_prv_descriptor(fwd_top_data);
#endif
} else {
res_convolutionFwd[dnnResourceDst] = out_ptr;
}
status = dnnExecute<DType>(convolutionFwd, res_convolutionFwd);
CHECK_EQ(status, 0) << "Forward convolution failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (fwd_top_data->conversion_needed()) {
fwd_top_data->convert_from_prv(out_ptr);
}
#endif
}
void AddToModeAllocAndStoreBuffer(void *src, int blob_size, Storage::Handle *pws) {
int blob_byte_size = blob_size * sizeof(DType);
*pws = Storage::Get()->Alloc(blob_byte_size, Context::CPU());
memcpy(pws->dptr, src, blob_byte_size);
}
void AddToModeAddAndReleaseBuffer(Storage::Handle *pws, void *dst_, int blob_size) {
DType *dst = reinterpret_cast<DType*>(dst_);
DType *src = reinterpret_cast<DType*>(pws->dptr);
#pragma omp parallel for
for (int i = 0; i < blob_size; i++) {
dst[i] += src[i];
}
if (pws->dptr)
Storage::Get()->Free(*pws);
pws->dptr = NULL;
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
if (param_.kernel.ndim() > 2) {
LOG(FATAL) << "Volume convolution is not implmented in mshadow";
}
CHECK_EQ(out_grad.size(), 1);
size_t expected = param_.no_bias == 0 ? 3 : 2;
CHECK(in_data.size() == expected && in_grad.size() == expected);
CHECK_EQ(req.size(), expected);
CHECK_EQ(in_data[conv::kWeight].CheckContiguous(), true);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4, DType> data =
mkl_experimental_direct_get<xpu, 4, DType>(in_data[conv::kData], s);
Shape<3> wmat_shape =
Shape3(param_.num_group,
param_.num_filter / param_.num_group,
data.shape_[1] / param_.num_group * param_.kernel[0] * param_.kernel[1]);
Tensor<xpu, 3, DType> wmat =
mkl_experimental_direct_get_with_shape<xpu, 3, DType>(
in_data[conv::kWeight], wmat_shape, s);
Tensor<xpu, 4, DType> grad =
mkl_experimental_direct_get<xpu, 4, DType>(out_grad[conv::kOut], s);
Tensor<xpu, 4, DType> gdata =
mkl_experimental_direct_get<xpu, 4, DType>(in_grad[conv::kData], s);
Tensor<xpu, 3, DType> gwmat =
mkl_experimental_direct_get_with_shape<xpu, 3, DType>(
in_grad[conv::kWeight], wmat_shape, s);
if (!init_mkldnn_) {
init_mkldnn_ = true;
LayerSetUp(data, grad);
}
int status;
if (req[0]) {
void *res_convolutionBwdData[dnnResourceNumber];
std::shared_ptr<MKLMemHolder> out_grad_mem =
#if MKL_EXPERIMENTAL == 1
out_grad[conv::kOut].Mkl_mem_;
#else
NULL;
#endif
res_convolutionBwdData[dnnResourceDiffDst] =
bwdd_top_diff->get_converted_prv(grad.dptr_, true, out_grad_mem);
std::shared_ptr<MKLMemHolder> in_weight_mem =
#if MKL_EXPERIMENTAL == 1
in_data[conv::kWeight].Mkl_mem_;
#else
NULL;
#endif
res_convolutionBwdData[dnnResourceFilter] =
bwdd_filter_data->get_converted_prv(wmat.dptr_, false, in_weight_mem);
Storage::Handle addtoWorkspace;
if (bwdd_bottom_diff->conversion_needed()) {
res_convolutionBwdData[dnnResourceDiffSrc] =
reinterpret_cast<void *>(bwdd_bottom_diff->prv_ptr());
#if MKL_EXPERIMENTAL == 1
std::shared_ptr<MKLMemHolder> bottom_diff_mem =
in_grad[conv::kData].Mkl_mem_;
bottom_diff_mem->set_prv_descriptor(bwdd_bottom_diff);
#endif
} else {
res_convolutionBwdData[dnnResourceDiffSrc] = gdata.dptr_;
if (req[0] == kAddTo) {
// wait mkl support addto mode
AddToModeAllocAndStoreBuffer(gdata.dptr_, in_grad[conv::kData].Size(), &addtoWorkspace);
}
}
status = dnnExecute<DType>(convolutionBwdData, res_convolutionBwdData);
CHECK_EQ(status, 0) << "Backward Data conv failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (bwdd_bottom_diff->conversion_needed()) {
bwdd_bottom_diff->convert_from_prv(gdata.dptr_);
}
#endif
if (!bwdd_bottom_diff->conversion_needed() && req[0] == kAddTo) {
AddToModeAddAndReleaseBuffer(&addtoWorkspace, gdata.dptr_, in_grad[conv::kData].Size());
}
}
if (req[1]) {
void *res_convolutionBwdFilter[dnnResourceNumber];
std::shared_ptr<MKLMemHolder> out_bias_mem =
#if MKL_EXPERIMENTAL == 1
out_grad[conv::kOut].Mkl_mem_;
#else
NULL;
#endif
res_convolutionBwdFilter[dnnResourceDiffDst] =
bwdf_top_diff->get_converted_prv(grad.dptr_, true, out_bias_mem);
#if MKL_EXPERIMENTAL == 1
std::shared_ptr<MKLMemHolder> in_data_mem = in_data[conv::kData].Mkl_mem_;
#else
std::shared_ptr<MKLMemHolder> in_data_mem = NULL;
#endif
res_convolutionBwdFilter[dnnResourceSrc] =
bwdf_bottom_data->get_converted_prv(data.dptr_, false,
in_data_mem);
Storage::Handle addtoWorkspace;
if (bwdf_filter_diff->conversion_needed()) {
#if MKL_EXPERIMENTAL == 1
std::shared_ptr<MKLMemHolder> gwamt_mem =
in_grad[conv::kWeight].Mkl_mem_;
gwamt_mem->set_prv_descriptor(bwdf_filter_diff);
#endif
res_convolutionBwdFilter[dnnResourceDiffFilter] =
reinterpret_cast<void *>(bwdf_filter_diff->prv_ptr());
} else {
res_convolutionBwdFilter[dnnResourceDiffFilter] = gwmat.dptr_;
if (req[0] == kAddTo) {
// wait mkl support addto mode
AddToModeAllocAndStoreBuffer(gwmat.dptr_, in_grad[conv::kWeight].Size(), &addtoWorkspace);
}
}
status = dnnExecute<DType>(convolutionBwdFilter, res_convolutionBwdFilter);
CHECK_EQ(status, 0) << "Backward Filter conv failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (bwdf_filter_diff->conversion_needed()) {
bwdf_filter_diff->convert_from_prv(gwmat.dptr_);
}
#endif
if (!bwdd_bottom_diff->conversion_needed() && req[0] == kAddTo) {
AddToModeAddAndReleaseBuffer(&addtoWorkspace, gwmat.dptr_, in_grad[conv::kWeight].Size());
}
}
if (!param_.no_bias) {
Tensor<xpu, 1, DType> gbias =
mkl_experimental_direct_get<xpu, 1, DType>(in_grad[conv::kBias], s);
void *res_convolutionBwdBias[dnnResourceNumber];
std::shared_ptr<MKLMemHolder> out_grad_mem =
#if MKL_EXPERIMENTAL == 1
out_grad[conv::kOut].Mkl_mem_;
#else
NULL;
#endif
res_convolutionBwdBias[dnnResourceDiffDst] =
bwdb_top_diff->get_converted_prv(grad.dptr_, true, out_grad_mem);
if (bwdb_bias_diff->conversion_needed()) {
#if MKL_EXPERIMENTAL == 1
std::shared_ptr<MKLMemHolder> gbias_mem = in_grad[conv::kBias].Mkl_mem_;
gbias_mem->set_prv_descriptor(bwdb_bias_diff);
#endif
res_convolutionBwdBias[dnnResourceDiffBias] =
bwdb_bias_diff->prv_ptr();
} else {
res_convolutionBwdBias[dnnResourceDiffBias] =
reinterpret_cast<void *>(gbias.dptr_);
}
status = dnnExecute<DType>(convolutionBwdBias, res_convolutionBwdBias);
CHECK_EQ(status, 0) << "Backward Bias failed with status " << status;
#if MKL_EXPERIMENTAL == 0
if (bwdb_bias_diff->conversion_needed()) {
bwdb_bias_diff->convert_from_prv(gbias.dptr_);
}
#endif
}
}
private:
ConvolutionParam param_;
size_t width_,
height_,
width_out_,
height_out_,
kernel_w_,
kernel_h_,
stride_w_,
stride_h_;
int group_,
num_,
num_output_;
size_t channels_;
int pad_w_,
pad_h_;
bool init_mkldnn_;
dnnPrimitive_t convolutionFwd;
dnnPrimitive_t convolutionBwdData;
dnnPrimitive_t convolutionBwdFilter;
dnnPrimitive_t convolutionBwdBias;
/* Fwd step */
std::shared_ptr<MKLData<DType> > fwd_bottom_data, fwd_top_data, fwd_filter_data,
fwd_bias_data;
/* Bwd data step */
std::shared_ptr<MKLData<DType> > bwdd_top_diff, bwdd_bottom_diff;
std::shared_ptr<MKLData<DType> > bwdd_filter_data;
/* Bwd filter step */
std::shared_ptr<MKLData<DType> > bwdf_top_diff, bwdf_filter_diff;
std::shared_ptr<MKLData<DType> > bwdf_bottom_data;
std::shared_ptr<MKLData<DType> > bwdf_filter_diff_iter, bwdf2fwd_filter_diff,
bwdb_bias_diff_iter;
/* Bwd bias step */
std::shared_ptr<MKLData<DType> > bwdb_top_diff, bwdb_bias_diff;
}; // class ConvolutionOp
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MKL_MKL_CONVOLUTION_INL_H_
|
pocketfft_hdronly.h | /*
This file is part of pocketfft.
Copyright (C) 2010-2019 Max-Planck-Society
Copyright (C) 2019 Peter Bell
For the odd-sized DCT-IV transforms:
Copyright (C) 2003, 2007-14 Matteo Frigo
Copyright (C) 2003, 2007-14 Massachusetts Institute of Technology
Authors: Martin Reinecke, Peter Bell
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef POCKETFFT_HDRONLY_H
#define POCKETFFT_HDRONLY_H
#ifndef __cplusplus
#error This file is C++ and requires a C++ compiler.
#endif
#if !(__cplusplus >= 201103L || _MSVC_LANG+0L >= 201103L)
#error This file requires at least C++11 support.
#endif
#ifndef POCKETFFT_CACHE_SIZE
#define POCKETFFT_CACHE_SIZE 16
#endif
#include <cmath>
#include <cstring>
#include <cstdlib>
#include <stdexcept>
#include <memory>
#include <vector>
#include <complex>
#if POCKETFFT_CACHE_SIZE!=0
#include <array>
#include <mutex>
#endif
#ifdef POCKETFFT_OPENMP
#include <omp.h>
#endif
#if defined(__GNUC__)
#define POCKETFFT_NOINLINE __attribute__((noinline))
#define POCKETFFT_RESTRICT __restrict__
#elif defined(_MSC_VER)
#define POCKETFFT_NOINLINE __declspec(noinline)
#define POCKETFFT_RESTRICT __restrict
#else
#define POCKETFFT_NOINLINE
#define POCKETFFT_RESTRICT
#endif
namespace pocketfft {
namespace detail {
using namespace std;
using shape_t = vector<size_t>;
using stride_t = vector<ptrdiff_t>;
constexpr bool FORWARD = true,
BACKWARD = false;
// only enable vector support for gcc>=5.0 and clang>=5.0
#ifndef POCKETFFT_NO_VECTORS
#define POCKETFFT_NO_VECTORS
#if defined(__INTEL_COMPILER)
// do nothing. This is necessary because this compiler also sets __GNUC__.
#elif defined(__clang__)
#if __clang__>=5
#undef POCKETFFT_NO_VECTORS
#endif
#elif defined(__GNUC__)
#if __GNUC__>=5
#undef POCKETFFT_NO_VECTORS
#endif
#endif
#endif
template<typename T> struct VLEN { static constexpr size_t val=1; };
#ifndef POCKETFFT_NO_VECTORS
#if (defined(__AVX512F__))
template<> struct VLEN<float> { static constexpr size_t val=16; };
template<> struct VLEN<double> { static constexpr size_t val=8; };
#elif (defined(__AVX__))
template<> struct VLEN<float> { static constexpr size_t val=8; };
template<> struct VLEN<double> { static constexpr size_t val=4; };
#elif (defined(__SSE2__))
template<> struct VLEN<float> { static constexpr size_t val=4; };
template<> struct VLEN<double> { static constexpr size_t val=2; };
#elif (defined(__VSX__))
template<> struct VLEN<float> { static constexpr size_t val=4; };
template<> struct VLEN<double> { static constexpr size_t val=2; };
#else
#define POCKETFFT_NO_VECTORS
#endif
#endif
template<typename T> class arr
{
private:
T *p;
size_t sz;
#if defined(POCKETFFT_NO_VECTORS)
static T *ralloc(size_t num)
{
if (num==0) return nullptr;
void *res = malloc(num*sizeof(T));
if (!res) throw bad_alloc();
return reinterpret_cast<T *>(res);
}
static void dealloc(T *ptr)
{ free(ptr); }
#elif __cplusplus >= 201703L
static T *ralloc(size_t num)
{
if (num==0) return nullptr;
void *res = aligned_alloc(64,num*sizeof(T));
if (!res) throw bad_alloc();
return reinterpret_cast<T *>(res);
}
static void dealloc(T *ptr)
{ free(ptr); }
#else // portable emulation
static T *ralloc(size_t num)
{
if (num==0) return nullptr;
void *ptr = malloc(num*sizeof(T)+64);
if (!ptr) throw bad_alloc();
T *res = reinterpret_cast<T *>
((reinterpret_cast<size_t>(ptr) & ~(size_t(63))) + 64);
(reinterpret_cast<void**>(res))[-1] = ptr;
return res;
}
static void dealloc(T *ptr)
{ if (ptr) free((reinterpret_cast<void**>(ptr))[-1]); }
#endif
public:
arr() : p(0), sz(0) {}
arr(size_t n) : p(ralloc(n)), sz(n) {}
arr(arr &&other)
: p(other.p), sz(other.sz)
{ other.p=nullptr; other.sz=0; }
~arr() { dealloc(p); }
void resize(size_t n)
{
if (n==sz) return;
dealloc(p);
p = ralloc(n);
sz = n;
}
T &operator[](size_t idx) { return p[idx]; }
const T &operator[](size_t idx) const { return p[idx]; }
T *data() { return p; }
const T *data() const { return p; }
size_t size() const { return sz; }
};
template<typename T> struct cmplx {
T r, i;
cmplx() {}
cmplx(T r_, T i_) : r(r_), i(i_) {}
void Set(T r_, T i_) { r=r_; i=i_; }
void Set(T r_) { r=r_; i=T(0); }
cmplx &operator+= (const cmplx &other)
{ r+=other.r; i+=other.i; return *this; }
template<typename T2>cmplx &operator*= (T2 other)
{ r*=other; i*=other; return *this; }
template<typename T2>cmplx &operator*= (const cmplx<T2> &other)
{
T tmp = r*other.r - i*other.i;
i = r*other.i + i*other.r;
r = tmp;
return *this;
}
cmplx operator+ (const cmplx &other) const
{ return cmplx(r+other.r, i+other.i); }
cmplx operator- (const cmplx &other) const
{ return cmplx(r-other.r, i-other.i); }
template<typename T2> auto operator* (const T2 &other) const
-> cmplx<decltype(r*other)>
{ return {r*other, i*other}; }
template<typename T2> auto operator* (const cmplx<T2> &other) const
-> cmplx<decltype(r+other.r)>
{ return {r*other.r-i*other.i, r*other.i + i*other.r}; }
template<bool fwd, typename T2> auto special_mul (const cmplx<T2> &other) const
-> cmplx<decltype(r+other.r)>
{
using Tres = cmplx<decltype(r+other.r)>;
return fwd ? Tres(r*other.r+i*other.i, i*other.r-r*other.i)
: Tres(r*other.r-i*other.i, r*other.i+i*other.r);
}
};
template<typename T> void PMC(cmplx<T> &a, cmplx<T> &b,
const cmplx<T> &c, const cmplx<T> &d)
{ a = c+d; b = c-d; }
template<typename T> cmplx<T> conj(const cmplx<T> &a)
{ return {a.r, -a.i}; }
template<typename T> void ROT90(cmplx<T> &a)
{ auto tmp_=a.r; a.r=-a.i; a.i=tmp_; }
template<bool fwd, typename T> void ROTX90(cmplx<T> &a)
{ auto tmp_= fwd ? -a.r : a.r; a.r = fwd ? a.i : -a.i; a.i=tmp_; }
//
// twiddle factor section
//
template<typename T> class sincos_2pibyn
{
private:
using Thigh = typename conditional<(sizeof(T)>sizeof(double)), T, double>::type;
arr<T> data;
void my_sincosm1pi (Thigh a_, Thigh *POCKETFFT_RESTRICT res)
{
if (sizeof(Thigh)>sizeof(double)) // don't have the code for long double
{
constexpr Thigh pi = Thigh(3.141592653589793238462643383279502884197L);
auto s = sin(pi*a_);
res[1] = s;
res[0] = (s*s)/(-sqrt((1-s)*(1+s))-1);
return;
}
// adapted from https://stackoverflow.com/questions/42792939/
// CAUTION: this function only works for arguments in the range
// [-0.25; 0.25]!
double a = double(a_);
double s = a * a;
/* Approximate cos(pi*x)-1 for x in [-0.25,0.25] */
double r = -1.0369917389758117e-4;
r = fma (r, s, 1.9294935641298806e-3);
r = fma (r, s, -2.5806887942825395e-2);
r = fma (r, s, 2.3533063028328211e-1);
r = fma (r, s, -1.3352627688538006e+0);
r = fma (r, s, 4.0587121264167623e+0);
r = fma (r, s, -4.9348022005446790e+0);
double c = r*s;
/* Approximate sin(pi*x) for x in [-0.25,0.25] */
r = 4.6151442520157035e-4;
r = fma (r, s, -7.3700183130883555e-3);
r = fma (r, s, 8.2145868949323936e-2);
r = fma (r, s, -5.9926452893214921e-1);
r = fma (r, s, 2.5501640398732688e+0);
r = fma (r, s, -5.1677127800499516e+0);
s = s * a;
r = r * s;
s = fma (a, 3.1415926535897931e+0, r);
res[0] = c;
res[1] = s;
}
POCKETFFT_NOINLINE void calc_first_octant(size_t den,
T * POCKETFFT_RESTRICT res)
{
size_t n = (den+4)>>3;
if (n==0) return;
res[0]=1.; res[1]=0.;
if (n==1) return;
size_t l1 = size_t(sqrt(n));
arr<Thigh> tmp(2*l1);
for (size_t i=1; i<l1; ++i)
{
my_sincosm1pi(Thigh(2*i)/Thigh(den),&tmp[2*i]);
res[2*i ] = T(tmp[2*i]+1);
res[2*i+1] = T(tmp[2*i+1]);
}
size_t start=l1;
while(start<n)
{
Thigh cs[2];
my_sincosm1pi((Thigh(2*start))/Thigh(den),cs);
res[2*start] = T(cs[0]+1);
res[2*start+1] = T(cs[1]);
size_t end = l1;
if (start+end>n) end = n-start;
for (size_t i=1; i<end; ++i)
{
Thigh csx[2]={tmp[2*i], tmp[2*i+1]};
res[2*(start+i)] = T(((cs[0]*csx[0] - cs[1]*csx[1] + cs[0]) + csx[0]) + 1);
res[2*(start+i)+1] = T((cs[0]*csx[1] + cs[1]*csx[0]) + cs[1] + csx[1]);
}
start += l1;
}
}
void calc_first_quadrant(size_t n, T * POCKETFFT_RESTRICT res)
{
T * POCKETFFT_RESTRICT p = res+n;
calc_first_octant(n<<1, p);
size_t ndone=(n+2)>>2;
size_t i=0, idx1=0, idx2=2*ndone-2;
for (; i+1<ndone; i+=2, idx1+=2, idx2-=2)
{
res[idx1] = p[2*i ]; res[idx1+1] = p[2*i+1];
res[idx2] = p[2*i+3]; res[idx2+1] = p[2*i+2];
}
if (i!=ndone)
{ res[idx1] = p[2*i]; res[idx1+1] = p[2*i+1]; }
}
void calc_first_half(size_t n, T * POCKETFFT_RESTRICT res)
{
int ndone=int(n+1)>>1;
T * p = res+n-1;
calc_first_octant(n<<2, p);
int i4=0, in=int(n), i=0;
for (; i4<=in-i4; ++i, i4+=4) // octant 0
{ res[2*i] = p[2*i4]; res[2*i+1] = p[2*i4+1]; }
for (; i4-in <= 0; ++i, i4+=4) // octant 1
{ auto xm = in-i4; res[2*i] = p[2*xm+1]; res[2*i+1] = p[2*xm]; }
for (; i4<=3*in-i4; ++i, i4+=4) // octant 2
{ auto xm = i4-in; res[2*i] = -p[2*xm+1]; res[2*i+1] = p[2*xm]; }
for (; i<ndone; ++i, i4+=4) // octant 3
{ auto xm = 2*in-i4; res[2*i] = -p[2*xm]; res[2*i+1] = p[2*xm+1]; }
}
void fill_first_quadrant(size_t n, T * POCKETFFT_RESTRICT res)
{
constexpr T hsqt2 = T(0.707106781186547524400844362104849L);
size_t quart = n>>2;
if ((n&7)==0)
res[quart] = res[quart+1] = hsqt2;
for (size_t i=2, j=2*quart-2; i<quart; i+=2, j-=2)
{ res[j] = res[i+1]; res[j+1] = res[i]; }
}
POCKETFFT_NOINLINE void fill_first_half(size_t n, T * POCKETFFT_RESTRICT res)
{
size_t half = n>>1;
if ((n&3)==0)
for (size_t i=0; i<half; i+=2)
{ res[i+half] = -res[i+1]; res[i+half+1] = res[i]; }
else
for (size_t i=2, j=2*half-2; i<half; i+=2, j-=2)
{ res[j] = -res[i]; res[j+1] = res[i+1]; }
}
void fill_second_half(size_t n, T * POCKETFFT_RESTRICT res)
{
if ((n&1)==0)
for (size_t i=0; i<n; ++i)
res[i+n] = -res[i];
else
for (size_t i=2, j=2*n-2; i<n; i+=2, j-=2)
{ res[j] = res[i]; res[j+1] = -res[i+1]; }
}
POCKETFFT_NOINLINE void sincos_2pibyn_half(size_t n, T * POCKETFFT_RESTRICT res)
{
if ((n&3)==0)
{
calc_first_octant(n, res);
fill_first_quadrant(n, res);
fill_first_half(n, res);
}
else if ((n&1)==0)
{
calc_first_quadrant(n, res);
fill_first_half(n, res);
}
else
calc_first_half(n, res);
}
public:
POCKETFFT_NOINLINE sincos_2pibyn(size_t n, bool half)
: data(2*n)
{
sincos_2pibyn_half(n, data.data());
if (!half) fill_second_half(n, data.data());
}
T operator[](size_t idx) const { return data[idx]; }
const T *rdata() const { return data; }
const cmplx<T> *cdata() const
{ return reinterpret_cast<const cmplx<T> *>(data.data()); }
};
struct util // hack to avoid duplicate symbols
{
static POCKETFFT_NOINLINE size_t largest_prime_factor (size_t n)
{
size_t res=1;
while ((n&1)==0)
{ res=2; n>>=1; }
for (size_t x=3; x*x<=n; x+=2)
while ((n%x)==0)
{ res=x; n/=x; }
if (n>1) res=n;
return res;
}
static POCKETFFT_NOINLINE double cost_guess (size_t n)
{
constexpr double lfp=1.1; // penalty for non-hardcoded larger factors
size_t ni=n;
double result=0.;
while ((n&1)==0)
{ result+=2; n>>=1; }
for (size_t x=3; x*x<=n; x+=2)
while ((n%x)==0)
{
result+= (x<=5) ? double(x) : lfp*double(x); // penalize larger prime factors
n/=x;
}
if (n>1) result+=(n<=5) ? double(n) : lfp*double(n);
return result*double(ni);
}
/* returns the smallest composite of 2, 3, 5, 7 and 11 which is >= n */
static POCKETFFT_NOINLINE size_t good_size(size_t n)
{
if (n<=12) return n;
size_t bestfac=2*n;
for (size_t f2=1; f2<bestfac; f2*=2)
for (size_t f23=f2; f23<bestfac; f23*=3)
for (size_t f235=f23; f235<bestfac; f235*=5)
for (size_t f2357=f235; f2357<bestfac; f2357*=7)
for (size_t f235711=f2357; f235711<bestfac; f235711*=11)
if (f235711>=n) bestfac=f235711;
return bestfac;
}
static size_t prod(const shape_t &shape)
{
size_t res=1;
for (auto sz: shape)
res*=sz;
return res;
}
static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape,
const stride_t &stride_in, const stride_t &stride_out, bool inplace)
{
auto ndim = shape.size();
if (ndim<1) throw runtime_error("ndim must be >= 1");
if ((stride_in.size()!=ndim) || (stride_out.size()!=ndim))
throw runtime_error("stride dimension mismatch");
if (inplace && (stride_in!=stride_out))
throw runtime_error("stride mismatch");
}
static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape,
const stride_t &stride_in, const stride_t &stride_out, bool inplace,
const shape_t &axes)
{
sanity_check(shape, stride_in, stride_out, inplace);
auto ndim = shape.size();
shape_t tmp(ndim,0);
for (auto ax : axes)
{
if (ax>=ndim) throw invalid_argument("bad axis number");
if (++tmp[ax]>1) throw invalid_argument("axis specified repeatedly");
}
}
static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape,
const stride_t &stride_in, const stride_t &stride_out, bool inplace,
size_t axis)
{
sanity_check(shape, stride_in, stride_out, inplace);
if (axis>=shape.size()) throw invalid_argument("bad axis number");
}
#ifdef POCKETFFT_OPENMP
static size_t nthreads() { return size_t(omp_get_num_threads()); }
static size_t thread_num() { return size_t(omp_get_thread_num()); }
static size_t thread_count (size_t nthreads, const shape_t &shape,
size_t axis)
{
if (nthreads==1) return 1;
if (prod(shape) < 20*shape[axis]) return 1;
return (nthreads==0) ? size_t(omp_get_max_threads()) : nthreads;
}
#else
static constexpr size_t nthreads() { return 1; }
static constexpr size_t thread_num() { return 0; }
#endif
};
//
// complex FFTPACK transforms
//
template<typename T0> class cfftp
{
private:
struct fctdata
{
size_t fct;
cmplx<T0> *tw, *tws;
};
size_t length;
arr<cmplx<T0>> mem;
vector<fctdata> fact;
void add_factor(size_t factor)
{ fact.push_back({factor, nullptr, nullptr}); }
template<bool fwd, typename T> void pass2 (size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const cmplx<T0> * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=2;
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto WA = [wa, ido](size_t x, size_t i)
{ return wa[i-1+x*(ido-1)]; };
if (ido==1)
for (size_t k=0; k<l1; ++k)
{
CH(0,k,0) = CC(0,0,k)+CC(0,1,k);
CH(0,k,1) = CC(0,0,k)-CC(0,1,k);
}
else
for (size_t k=0; k<l1; ++k)
{
CH(0,k,0) = CC(0,0,k)+CC(0,1,k);
CH(0,k,1) = CC(0,0,k)-CC(0,1,k);
for (size_t i=1; i<ido; ++i)
{
CH(i,k,0) = CC(i,0,k)+CC(i,1,k);
CH(i,k,1) = (CC(i,0,k)-CC(i,1,k)).template special_mul<fwd>(WA(0,i));
}
}
}
#define POCKETFFT_PREP3(idx) \
T t0 = CC(idx,0,k), t1, t2; \
PMC (t1,t2,CC(idx,1,k),CC(idx,2,k)); \
CH(idx,k,0)=t0+t1;
#define POCKETFFT_PARTSTEP3a(u1,u2,twr,twi) \
{ \
T ca,cb; \
ca=t0+t1*twr; \
cb=t2*twi; ROT90(cb); \
PMC(CH(0,k,u1),CH(0,k,u2),ca,cb) ;\
}
#define POCKETFFT_PARTSTEP3b(u1,u2,twr,twi) \
{ \
T ca,cb,da,db; \
ca=t0+t1*twr; \
cb=t2*twi; ROT90(cb); \
PMC(da,db,ca,cb); \
CH(i,k,u1) = da.template special_mul<fwd>(WA(u1-1,i)); \
CH(i,k,u2) = db.template special_mul<fwd>(WA(u2-1,i)); \
}
template<bool fwd, typename T> void pass3 (size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const cmplx<T0> * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=3;
constexpr T0 tw1r=-0.5,
tw1i= (fwd ? -1: 1) * T0(0.8660254037844386467637231707529362L);
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto WA = [wa, ido](size_t x, size_t i)
{ return wa[i-1+x*(ido-1)]; };
if (ido==1)
for (size_t k=0; k<l1; ++k)
{
POCKETFFT_PREP3(0)
POCKETFFT_PARTSTEP3a(1,2,tw1r,tw1i)
}
else
for (size_t k=0; k<l1; ++k)
{
{
POCKETFFT_PREP3(0)
POCKETFFT_PARTSTEP3a(1,2,tw1r,tw1i)
}
for (size_t i=1; i<ido; ++i)
{
POCKETFFT_PREP3(i)
POCKETFFT_PARTSTEP3b(1,2,tw1r,tw1i)
}
}
}
#undef POCKETFFT_PARTSTEP3b
#undef POCKETFFT_PARTSTEP3a
#undef POCKETFFT_PREP3
template<bool fwd, typename T> void pass4 (size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const cmplx<T0> * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=4;
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto WA = [wa, ido](size_t x, size_t i)
{ return wa[i-1+x*(ido-1)]; };
if (ido==1)
for (size_t k=0; k<l1; ++k)
{
T t1, t2, t3, t4;
PMC(t2,t1,CC(0,0,k),CC(0,2,k));
PMC(t3,t4,CC(0,1,k),CC(0,3,k));
ROTX90<fwd>(t4);
PMC(CH(0,k,0),CH(0,k,2),t2,t3);
PMC(CH(0,k,1),CH(0,k,3),t1,t4);
}
else
for (size_t k=0; k<l1; ++k)
{
{
T t1, t2, t3, t4;
PMC(t2,t1,CC(0,0,k),CC(0,2,k));
PMC(t3,t4,CC(0,1,k),CC(0,3,k));
ROTX90<fwd>(t4);
PMC(CH(0,k,0),CH(0,k,2),t2,t3);
PMC(CH(0,k,1),CH(0,k,3),t1,t4);
}
for (size_t i=1; i<ido; ++i)
{
T c2, c3, c4, t1, t2, t3, t4;
T cc0=CC(i,0,k), cc1=CC(i,1,k),cc2=CC(i,2,k),cc3=CC(i,3,k);
PMC(t2,t1,cc0,cc2);
PMC(t3,t4,cc1,cc3);
ROTX90<fwd>(t4);
PMC(CH(i,k,0),c3,t2,t3);
PMC(c2,c4,t1,t4);
CH(i,k,1) = c2.template special_mul<fwd>(WA(0,i));
CH(i,k,2) = c3.template special_mul<fwd>(WA(1,i));
CH(i,k,3) = c4.template special_mul<fwd>(WA(2,i));
}
}
}
#define POCKETFFT_PREP5(idx) \
T t0 = CC(idx,0,k), t1, t2, t3, t4; \
PMC (t1,t4,CC(idx,1,k),CC(idx,4,k)); \
PMC (t2,t3,CC(idx,2,k),CC(idx,3,k)); \
CH(idx,k,0).r=t0.r+t1.r+t2.r; \
CH(idx,k,0).i=t0.i+t1.i+t2.i;
#define POCKETFFT_PARTSTEP5a(u1,u2,twar,twbr,twai,twbi) \
{ \
T ca,cb; \
ca.r=t0.r+twar*t1.r+twbr*t2.r; \
ca.i=t0.i+twar*t1.i+twbr*t2.i; \
cb.i=twai*t4.r twbi*t3.r; \
cb.r=-(twai*t4.i twbi*t3.i); \
PMC(CH(0,k,u1),CH(0,k,u2),ca,cb); \
}
#define POCKETFFT_PARTSTEP5b(u1,u2,twar,twbr,twai,twbi) \
{ \
T ca,cb,da,db; \
ca.r=t0.r+twar*t1.r+twbr*t2.r; \
ca.i=t0.i+twar*t1.i+twbr*t2.i; \
cb.i=twai*t4.r twbi*t3.r; \
cb.r=-(twai*t4.i twbi*t3.i); \
PMC(da,db,ca,cb); \
CH(i,k,u1) = da.template special_mul<fwd>(WA(u1-1,i)); \
CH(i,k,u2) = db.template special_mul<fwd>(WA(u2-1,i)); \
}
template<bool fwd, typename T> void pass5 (size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const cmplx<T0> * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=5;
constexpr T0 tw1r= T0(0.3090169943749474241022934171828191L),
tw1i= (fwd ? -1: 1) * T0(0.9510565162951535721164393333793821L),
tw2r= T0(-0.8090169943749474241022934171828191L),
tw2i= (fwd ? -1: 1) * T0(0.5877852522924731291687059546390728L);
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto WA = [wa, ido](size_t x, size_t i)
{ return wa[i-1+x*(ido-1)]; };
if (ido==1)
for (size_t k=0; k<l1; ++k)
{
POCKETFFT_PREP5(0)
POCKETFFT_PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
POCKETFFT_PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
}
else
for (size_t k=0; k<l1; ++k)
{
{
POCKETFFT_PREP5(0)
POCKETFFT_PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
POCKETFFT_PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
}
for (size_t i=1; i<ido; ++i)
{
POCKETFFT_PREP5(i)
POCKETFFT_PARTSTEP5b(1,4,tw1r,tw2r,+tw1i,+tw2i)
POCKETFFT_PARTSTEP5b(2,3,tw2r,tw1r,+tw2i,-tw1i)
}
}
}
#undef POCKETFFT_PARTSTEP5b
#undef POCKETFFT_PARTSTEP5a
#undef POCKETFFT_PREP5
#define POCKETFFT_PREP7(idx) \
T t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7; \
PMC (t2,t7,CC(idx,1,k),CC(idx,6,k)); \
PMC (t3,t6,CC(idx,2,k),CC(idx,5,k)); \
PMC (t4,t5,CC(idx,3,k),CC(idx,4,k)); \
CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r; \
CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i;
#define POCKETFFT_PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,out1,out2) \
{ \
T ca,cb; \
ca.r=t1.r+x1*t2.r+x2*t3.r+x3*t4.r; \
ca.i=t1.i+x1*t2.i+x2*t3.i+x3*t4.i; \
cb.i=y1*t7.r y2*t6.r y3*t5.r; \
cb.r=-(y1*t7.i y2*t6.i y3*t5.i); \
PMC(out1,out2,ca,cb); \
}
#define POCKETFFT_PARTSTEP7a(u1,u2,x1,x2,x3,y1,y2,y3) \
POCKETFFT_PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,CH(0,k,u1),CH(0,k,u2))
#define POCKETFFT_PARTSTEP7(u1,u2,x1,x2,x3,y1,y2,y3) \
{ \
T da,db; \
POCKETFFT_PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,da,db) \
CH(i,k,u1) = da.template special_mul<fwd>(WA(u1-1,i)); \
CH(i,k,u2) = db.template special_mul<fwd>(WA(u2-1,i)); \
}
template<bool fwd, typename T> void pass7(size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const cmplx<T0> * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=7;
constexpr T0 tw1r= T0(0.6234898018587335305250048840042398L),
tw1i= (fwd ? -1 : 1) * T0(0.7818314824680298087084445266740578L),
tw2r= T0(-0.2225209339563144042889025644967948L),
tw2i= (fwd ? -1 : 1) * T0(0.9749279121818236070181316829939312L),
tw3r= T0(-0.9009688679024191262361023195074451L),
tw3i= (fwd ? -1 : 1) * T0(0.433883739117558120475768332848359L);
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto WA = [wa, ido](size_t x, size_t i)
{ return wa[i-1+x*(ido-1)]; };
if (ido==1)
for (size_t k=0; k<l1; ++k)
{
POCKETFFT_PREP7(0)
POCKETFFT_PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
POCKETFFT_PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
POCKETFFT_PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
}
else
for (size_t k=0; k<l1; ++k)
{
{
POCKETFFT_PREP7(0)
POCKETFFT_PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
POCKETFFT_PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
POCKETFFT_PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
}
for (size_t i=1; i<ido; ++i)
{
POCKETFFT_PREP7(i)
POCKETFFT_PARTSTEP7(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
POCKETFFT_PARTSTEP7(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
POCKETFFT_PARTSTEP7(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
}
}
}
#undef POCKETFFT_PARTSTEP7
#undef POCKETFFT_PARTSTEP7a0
#undef POCKETFFT_PARTSTEP7a
#undef POCKETFFT_PREP7
template <bool fwd, typename T> void ROTX45(T &a)
{
constexpr T0 hsqt2=T0(0.707106781186547524400844362104849L);
if (fwd)
{ auto tmp_=a.r; a.r=hsqt2*(a.r+a.i); a.i=hsqt2*(a.i-tmp_); }
else
{ auto tmp_=a.r; a.r=hsqt2*(a.r-a.i); a.i=hsqt2*(a.i+tmp_); }
}
template <bool fwd, typename T> void ROTX135(T &a)
{
constexpr T0 hsqt2=T0(0.707106781186547524400844362104849L);
if (fwd)
{ auto tmp_=a.r; a.r=hsqt2*(a.i-a.r); a.i=hsqt2*(-tmp_-a.i); }
else
{ auto tmp_=a.r; a.r=hsqt2*(-a.r-a.i); a.i=hsqt2*(tmp_-a.i); }
}
template<typename T> inline void PMINPLACE(T &a, T &b)
{ T t = a; a.r+=b.r; a.i+=b.i; b.r=t.r-b.r; b.i=t.i-b.i; }
template<bool fwd, typename T> void pass8 (size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const cmplx<T0> * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=8;
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto WA = [wa, ido](size_t x, size_t i)
{ return wa[i-1+x*(ido-1)]; };
if (ido==1)
for (size_t k=0; k<l1; ++k)
{
T a0, a1, a2, a3, a4, a5, a6, a7;
PMC(a0,a4,CC(0,0,k),CC(0,4,k));
PMC(a1,a5,CC(0,1,k),CC(0,5,k));
PMC(a2,a6,CC(0,2,k),CC(0,6,k));
PMC(a3,a7,CC(0,3,k),CC(0,7,k));
ROTX90<fwd>(a6);
ROTX90<fwd>(a7);
PMINPLACE(a0,a2);
PMINPLACE(a1,a3);
PMINPLACE(a4,a6);
PMINPLACE(a5,a7);
ROTX45<fwd>(a5);
ROTX90<fwd>(a3);
ROTX135<fwd>(a7);
PMC(CH(0,k,0),CH(0,k,4),a0,a1);
PMC(CH(0,k,1),CH(0,k,5),a4,a5);
PMC(CH(0,k,2),CH(0,k,6),a2,a3);
PMC(CH(0,k,3),CH(0,k,7),a6,a7);
}
else
for (size_t k=0; k<l1; ++k)
{
T a0, a1, a2, a3, a4, a5, a6, a7;
PMC(a0,a4,CC(0,0,k),CC(0,4,k));
PMC(a1,a5,CC(0,1,k),CC(0,5,k));
PMC(a2,a6,CC(0,2,k),CC(0,6,k));
PMC(a3,a7,CC(0,3,k),CC(0,7,k));
ROTX90<fwd>(a6);
ROTX90<fwd>(a7);
PMINPLACE(a0,a2);
PMINPLACE(a1,a3);
PMINPLACE(a4,a6);
PMINPLACE(a5,a7);
ROTX45<fwd>(a5);
ROTX90<fwd>(a3);
ROTX135<fwd>(a7);
PMC(CH(0,k,0),CH(0,k,4),a0,a1);
PMC(CH(0,k,1),CH(0,k,5),a4,a5);
PMC(CH(0,k,2),CH(0,k,6),a2,a3);
PMC(CH(0,k,3),CH(0,k,7),a6,a7);
for (size_t i=1; i<ido; ++i)
{
T a0, a1, a2, a3, a4, a5, a6, a7;
PMC(a0,a4,CC(i,0,k),CC(i,4,k));
PMC(a1,a5,CC(i,1,k),CC(i,5,k));
PMC(a2,a6,CC(i,2,k),CC(i,6,k));
PMC(a3,a7,CC(i,3,k),CC(i,7,k));
ROTX90<fwd>(a6);
ROTX90<fwd>(a7);
PMINPLACE(a0,a2);
PMINPLACE(a1,a3);
PMINPLACE(a4,a6);
PMINPLACE(a5,a7);
ROTX45<fwd>(a5);
ROTX90<fwd>(a3);
ROTX135<fwd>(a7);
PMINPLACE(a0,a1);
PMINPLACE(a2,a3);
PMINPLACE(a4,a5);
PMINPLACE(a6,a7);
CH(i,k,0) = a0;
CH(i,k,1) = a4.template special_mul<fwd>(WA(0,i));
CH(i,k,2) = a2.template special_mul<fwd>(WA(1,i));
CH(i,k,3) = a6.template special_mul<fwd>(WA(2,i));
CH(i,k,4) = a1.template special_mul<fwd>(WA(3,i));
CH(i,k,5) = a5.template special_mul<fwd>(WA(4,i));
CH(i,k,6) = a3.template special_mul<fwd>(WA(5,i));
CH(i,k,7) = a7.template special_mul<fwd>(WA(6,i));
}
}
}
#define POCKETFFT_PREP11(idx) \
T t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7, t8, t9, t10, t11; \
PMC (t2,t11,CC(idx,1,k),CC(idx,10,k)); \
PMC (t3,t10,CC(idx,2,k),CC(idx, 9,k)); \
PMC (t4,t9 ,CC(idx,3,k),CC(idx, 8,k)); \
PMC (t5,t8 ,CC(idx,4,k),CC(idx, 7,k)); \
PMC (t6,t7 ,CC(idx,5,k),CC(idx, 6,k)); \
CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r+t5.r+t6.r; \
CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i+t5.i+t6.i;
#define POCKETFFT_PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,out1,out2) \
{ \
T ca = t1 + t2*x1 + t3*x2 + t4*x3 + t5*x4 +t6*x5, \
cb; \
cb.i=y1*t11.r y2*t10.r y3*t9.r y4*t8.r y5*t7.r; \
cb.r=-(y1*t11.i y2*t10.i y3*t9.i y4*t8.i y5*t7.i ); \
PMC(out1,out2,ca,cb); \
}
#define POCKETFFT_PARTSTEP11a(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \
POCKETFFT_PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,CH(0,k,u1),CH(0,k,u2))
#define POCKETFFT_PARTSTEP11(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \
{ \
T da,db; \
POCKETFFT_PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,da,db) \
CH(i,k,u1) = da.template special_mul<fwd>(WA(u1-1,i)); \
CH(i,k,u2) = db.template special_mul<fwd>(WA(u2-1,i)); \
}
template<bool fwd, typename T> void pass11 (size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const cmplx<T0> * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=11;
constexpr T0 tw1r= T0(0.8412535328311811688618116489193677L),
tw1i= (fwd ? -1 : 1) * T0(0.5406408174555975821076359543186917L),
tw2r= T0(0.4154150130018864255292741492296232L),
tw2i= (fwd ? -1 : 1) * T0(0.9096319953545183714117153830790285L),
tw3r= T0(-0.1423148382732851404437926686163697L),
tw3i= (fwd ? -1 : 1) * T0(0.9898214418809327323760920377767188L),
tw4r= T0(-0.6548607339452850640569250724662936L),
tw4i= (fwd ? -1 : 1) * T0(0.7557495743542582837740358439723444L),
tw5r= T0(-0.9594929736144973898903680570663277L),
tw5i= (fwd ? -1 : 1) * T0(0.2817325568414296977114179153466169L);
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto WA = [wa, ido](size_t x, size_t i)
{ return wa[i-1+x*(ido-1)]; };
if (ido==1)
for (size_t k=0; k<l1; ++k)
{
POCKETFFT_PREP11(0)
POCKETFFT_PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
POCKETFFT_PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
POCKETFFT_PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
POCKETFFT_PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
POCKETFFT_PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
}
else
for (size_t k=0; k<l1; ++k)
{
{
POCKETFFT_PREP11(0)
POCKETFFT_PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
POCKETFFT_PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
POCKETFFT_PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
POCKETFFT_PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
POCKETFFT_PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
}
for (size_t i=1; i<ido; ++i)
{
POCKETFFT_PREP11(i)
POCKETFFT_PARTSTEP11(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
POCKETFFT_PARTSTEP11(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
POCKETFFT_PARTSTEP11(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
POCKETFFT_PARTSTEP11(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
POCKETFFT_PARTSTEP11(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
}
}
}
#undef PARTSTEP11
#undef PARTSTEP11a0
#undef PARTSTEP11a
#undef POCKETFFT_PREP11
template<bool fwd, typename T> void passg (size_t ido, size_t ip,
size_t l1, T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const cmplx<T0> * POCKETFFT_RESTRICT wa,
const cmplx<T0> * POCKETFFT_RESTRICT csarr)
{
const size_t cdim=ip;
size_t ipph = (ip+1)/2;
size_t idl1 = ido*l1;
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto CX = [cc, ido, l1](size_t a, size_t b, size_t c) -> T&
{ return cc[a+ido*(b+l1*c)]; };
auto CX2 = [cc, idl1](size_t a, size_t b) -> T&
{ return cc[a+idl1*b]; };
auto CH2 = [ch, idl1](size_t a, size_t b) -> const T&
{ return ch[a+idl1*b]; };
arr<cmplx<T0>> wal(ip);
wal[0] = cmplx<T0>(1., 0.);
for (size_t i=1; i<ip; ++i)
wal[i]=cmplx<T0>(csarr[i].r,fwd ? -csarr[i].i : csarr[i].i);
for (size_t k=0; k<l1; ++k)
for (size_t i=0; i<ido; ++i)
CH(i,k,0) = CC(i,0,k);
for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc)
for (size_t k=0; k<l1; ++k)
for (size_t i=0; i<ido; ++i)
PMC(CH(i,k,j),CH(i,k,jc),CC(i,j,k),CC(i,jc,k));
for (size_t k=0; k<l1; ++k)
for (size_t i=0; i<ido; ++i)
{
T tmp = CH(i,k,0);
for (size_t j=1; j<ipph; ++j)
tmp+=CH(i,k,j);
CX(i,k,0) = tmp;
}
for (size_t l=1, lc=ip-1; l<ipph; ++l, --lc)
{
// j=0
for (size_t ik=0; ik<idl1; ++ik)
{
CX2(ik,l).r = CH2(ik,0).r+wal[l].r*CH2(ik,1).r+wal[2*l].r*CH2(ik,2).r;
CX2(ik,l).i = CH2(ik,0).i+wal[l].r*CH2(ik,1).i+wal[2*l].r*CH2(ik,2).i;
CX2(ik,lc).r=-wal[l].i*CH2(ik,ip-1).i-wal[2*l].i*CH2(ik,ip-2).i;
CX2(ik,lc).i=wal[l].i*CH2(ik,ip-1).r+wal[2*l].i*CH2(ik,ip-2).r;
}
size_t iwal=2*l;
size_t j=3, jc=ip-3;
for (; j<ipph-1; j+=2, jc-=2)
{
iwal+=l; if (iwal>ip) iwal-=ip;
cmplx<T0> xwal=wal[iwal];
iwal+=l; if (iwal>ip) iwal-=ip;
cmplx<T0> xwal2=wal[iwal];
for (size_t ik=0; ik<idl1; ++ik)
{
CX2(ik,l).r += CH2(ik,j).r*xwal.r+CH2(ik,j+1).r*xwal2.r;
CX2(ik,l).i += CH2(ik,j).i*xwal.r+CH2(ik,j+1).i*xwal2.r;
CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i+CH2(ik,jc-1).i*xwal2.i;
CX2(ik,lc).i += CH2(ik,jc).r*xwal.i+CH2(ik,jc-1).r*xwal2.i;
}
}
for (; j<ipph; ++j, --jc)
{
iwal+=l; if (iwal>ip) iwal-=ip;
cmplx<T0> xwal=wal[iwal];
for (size_t ik=0; ik<idl1; ++ik)
{
CX2(ik,l).r += CH2(ik,j).r*xwal.r;
CX2(ik,l).i += CH2(ik,j).i*xwal.r;
CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i;
CX2(ik,lc).i += CH2(ik,jc).r*xwal.i;
}
}
}
// shuffling and twiddling
if (ido==1)
for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc)
for (size_t ik=0; ik<idl1; ++ik)
{
T t1=CX2(ik,j), t2=CX2(ik,jc);
PMC(CX2(ik,j),CX2(ik,jc),t1,t2);
}
else
{
for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc)
for (size_t k=0; k<l1; ++k)
{
T t1=CX(0,k,j), t2=CX(0,k,jc);
PMC(CX(0,k,j),CX(0,k,jc),t1,t2);
for (size_t i=1; i<ido; ++i)
{
T x1, x2;
PMC(x1,x2,CX(i,k,j),CX(i,k,jc));
size_t idij=(j-1)*(ido-1)+i-1;
CX(i,k,j) = x1.template special_mul<fwd>(wa[idij]);
idij=(jc-1)*(ido-1)+i-1;
CX(i,k,jc) = x2.template special_mul<fwd>(wa[idij]);
}
}
}
}
template<bool fwd, typename T> void pass_all(T c[], T0 fct)
{
if (length==1) { c[0]*=fct; return; }
size_t l1=1;
arr<T> ch(length);
T *p1=c, *p2=ch.data();
for(size_t k1=0; k1<fact.size(); k1++)
{
size_t ip=fact[k1].fct;
size_t l2=ip*l1;
size_t ido = length/l2;
if (ip==4)
pass4<fwd> (ido, l1, p1, p2, fact[k1].tw);
else if(ip==8)
pass8<fwd>(ido, l1, p1, p2, fact[k1].tw);
else if(ip==2)
pass2<fwd>(ido, l1, p1, p2, fact[k1].tw);
else if(ip==3)
pass3<fwd> (ido, l1, p1, p2, fact[k1].tw);
else if(ip==5)
pass5<fwd> (ido, l1, p1, p2, fact[k1].tw);
else if(ip==7)
pass7<fwd> (ido, l1, p1, p2, fact[k1].tw);
else if(ip==11)
pass11<fwd> (ido, l1, p1, p2, fact[k1].tw);
else
{
passg<fwd>(ido, ip, l1, p1, p2, fact[k1].tw, fact[k1].tws);
swap(p1,p2);
}
swap(p1,p2);
l1=l2;
}
if (p1!=c)
{
if (fct!=1.)
for (size_t i=0; i<length; ++i)
c[i] = ch[i]*fct;
else
memcpy (c,p1,length*sizeof(T));
}
else
if (fct!=1.)
for (size_t i=0; i<length; ++i)
c[i] *= fct;
}
public:
template<typename T> void forward(T c[], T0 fct)
{ pass_all<true>(c, fct); }
template<typename T> void backward(T c[], T0 fct)
{ pass_all<false>(c, fct); }
private:
POCKETFFT_NOINLINE void factorize()
{
size_t len=length;
while ((len&7)==0)
{ add_factor(8); len>>=3; }
while ((len&3)==0)
{ add_factor(4); len>>=2; }
if ((len&1)==0)
{
len>>=1;
// factor 2 should be at the front of the factor list
add_factor(2);
swap(fact[0].fct, fact.back().fct);
}
for (size_t divisor=3; divisor*divisor<=len; divisor+=2)
while ((len%divisor)==0)
{
add_factor(divisor);
len/=divisor;
}
if (len>1) add_factor(len);
}
size_t twsize() const
{
size_t twsize=0, l1=1;
for (size_t k=0; k<fact.size(); ++k)
{
size_t ip=fact[k].fct, ido= length/(l1*ip);
twsize+=(ip-1)*(ido-1);
if (ip>11)
twsize+=ip;
l1*=ip;
}
return twsize;
}
void comp_twiddle()
{
sincos_2pibyn<T0> twid(length, false);
auto twiddle = twid.cdata();
size_t l1=1;
size_t memofs=0;
for (size_t k=0; k<fact.size(); ++k)
{
size_t ip=fact[k].fct, ido=length/(l1*ip);
fact[k].tw=mem.data()+memofs;
memofs+=(ip-1)*(ido-1);
for (size_t j=1; j<ip; ++j)
for (size_t i=1; i<ido; ++i)
fact[k].tw[(j-1)*(ido-1)+i-1] = twiddle[j*l1*i];
if (ip>11)
{
fact[k].tws=mem.data()+memofs;
memofs+=ip;
for (size_t j=0; j<ip; ++j)
fact[k].tws[j] = twiddle[j*l1*ido];
}
l1*=ip;
}
}
public:
POCKETFFT_NOINLINE cfftp(size_t length_)
: length(length_)
{
if (length==0) throw runtime_error("zero-length FFT requested");
if (length==1) return;
factorize();
mem.resize(twsize());
comp_twiddle();
}
};
//
// real-valued FFTPACK transforms
//
template<typename T0> class rfftp
{
private:
struct fctdata
{
size_t fct;
T0 *tw, *tws;
};
size_t length;
arr<T0> mem;
vector<fctdata> fact;
void add_factor(size_t factor)
{ fact.push_back({factor, nullptr, nullptr}); }
template<typename T> inline void PM(T &a, T &b, T c, T d)
{ a=c+d; b=c-d; }
/* (a+ib) = conj(c+id) * (e+if) */
template<typename T1, typename T2, typename T3> inline void MULPM
(T1 &a, T1 &b, T2 c, T2 d, T3 e, T3 f)
{ a=c*e+d*f; b=c*f-d*e; }
template<typename T> void radf2 (size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const T0 * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=2;
auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+l1*c)]; };
auto CH = [ch,ido,cdim](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+cdim*c)]; };
for (size_t k=0; k<l1; k++)
PM (CH(0,0,k),CH(ido-1,1,k),CC(0,k,0),CC(0,k,1));
if ((ido&1)==0)
for (size_t k=0; k<l1; k++)
{
CH( 0,1,k) = -CC(ido-1,k,1);
CH(ido-1,0,k) = CC(ido-1,k,0);
}
if (ido<=2) return;
for (size_t k=0; k<l1; k++)
for (size_t i=2; i<ido; i+=2)
{
size_t ic=ido-i;
T tr2, ti2;
MULPM (tr2,ti2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1));
PM (CH(i-1,0,k),CH(ic-1,1,k),CC(i-1,k,0),tr2);
PM (CH(i ,0,k),CH(ic ,1,k),ti2,CC(i ,k,0));
}
}
// a2=a+b; b2=i*(b-a);
#define POCKETFFT_REARRANGE(rx, ix, ry, iy) \
{\
auto t1=rx+ry, t2=ry-rx, t3=ix+iy, t4=ix-iy; \
rx=t1; ix=t3; ry=t4; iy=t2; \
}
template<typename T> void radf3(size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const T0 * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=3;
constexpr T0 taur=-0.5, taui=T0(0.8660254037844386467637231707529362L);
auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+l1*c)]; };
auto CH = [ch,ido,cdim](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+cdim*c)]; };
for (size_t k=0; k<l1; k++)
{
T cr2=CC(0,k,1)+CC(0,k,2);
CH(0,0,k) = CC(0,k,0)+cr2;
CH(0,2,k) = taui*(CC(0,k,2)-CC(0,k,1));
CH(ido-1,1,k) = CC(0,k,0)+taur*cr2;
}
if (ido==1) return;
for (size_t k=0; k<l1; k++)
for (size_t i=2; i<ido; i+=2)
{
size_t ic=ido-i;
T di2, di3, dr2, dr3;
MULPM (dr2,di2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)); // d2=conj(WA0)*CC1
MULPM (dr3,di3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2)); // d3=conj(WA1)*CC2
POCKETFFT_REARRANGE(dr2, di2, dr3, di3);
CH(i-1,0,k) = CC(i-1,k,0)+dr2; // c add
CH(i ,0,k) = CC(i ,k,0)+di2;
T tr2 = CC(i-1,k,0)+taur*dr2; // c add
T ti2 = CC(i ,k,0)+taur*di2;
T tr3 = taui*dr3; // t3 = taui*i*(d3-d2)?
T ti3 = taui*di3;
PM(CH(i-1,2,k),CH(ic-1,1,k),tr2,tr3); // PM(i) = t2+t3
PM(CH(i ,2,k),CH(ic ,1,k),ti3,ti2); // PM(ic) = conj(t2-t3)
}
}
template<typename T> void radf4(size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const T0 * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=4;
constexpr T0 hsqt2=T0(0.707106781186547524400844362104849L);
auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+l1*c)]; };
auto CH = [ch,ido,cdim](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+cdim*c)]; };
for (size_t k=0; k<l1; k++)
{
T tr1,tr2;
PM (tr1,CH(0,2,k),CC(0,k,3),CC(0,k,1));
PM (tr2,CH(ido-1,1,k),CC(0,k,0),CC(0,k,2));
PM (CH(0,0,k),CH(ido-1,3,k),tr2,tr1);
}
if ((ido&1)==0)
for (size_t k=0; k<l1; k++)
{
T ti1=-hsqt2*(CC(ido-1,k,1)+CC(ido-1,k,3));
T tr1= hsqt2*(CC(ido-1,k,1)-CC(ido-1,k,3));
PM (CH(ido-1,0,k),CH(ido-1,2,k),CC(ido-1,k,0),tr1);
PM (CH( 0,3,k),CH( 0,1,k),ti1,CC(ido-1,k,2));
}
if (ido<=2) return;
for (size_t k=0; k<l1; k++)
for (size_t i=2; i<ido; i+=2)
{
size_t ic=ido-i;
T ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
MULPM(cr2,ci2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1));
MULPM(cr3,ci3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2));
MULPM(cr4,ci4,WA(2,i-2),WA(2,i-1),CC(i-1,k,3),CC(i,k,3));
PM(tr1,tr4,cr4,cr2);
PM(ti1,ti4,ci2,ci4);
PM(tr2,tr3,CC(i-1,k,0),cr3);
PM(ti2,ti3,CC(i ,k,0),ci3);
PM(CH(i-1,0,k),CH(ic-1,3,k),tr2,tr1);
PM(CH(i ,0,k),CH(ic ,3,k),ti1,ti2);
PM(CH(i-1,2,k),CH(ic-1,1,k),tr3,ti4);
PM(CH(i ,2,k),CH(ic ,1,k),tr4,ti3);
}
}
template<typename T> void radf5(size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const T0 * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=5;
constexpr T0 tr11= T0(0.3090169943749474241022934171828191L),
ti11= T0(0.9510565162951535721164393333793821L),
tr12= T0(-0.8090169943749474241022934171828191L),
ti12= T0(0.5877852522924731291687059546390728L);
auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+l1*c)]; };
auto CH = [ch,ido,cdim](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+cdim*c)]; };
for (size_t k=0; k<l1; k++)
{
T cr2, cr3, ci4, ci5;
PM (cr2,ci5,CC(0,k,4),CC(0,k,1));
PM (cr3,ci4,CC(0,k,3),CC(0,k,2));
CH(0,0,k)=CC(0,k,0)+cr2+cr3;
CH(ido-1,1,k)=CC(0,k,0)+tr11*cr2+tr12*cr3;
CH(0,2,k)=ti11*ci5+ti12*ci4;
CH(ido-1,3,k)=CC(0,k,0)+tr12*cr2+tr11*cr3;
CH(0,4,k)=ti12*ci5-ti11*ci4;
}
if (ido==1) return;
for (size_t k=0; k<l1;++k)
for (size_t i=2, ic=ido-2; i<ido; i+=2, ic-=2)
{
T di2, di3, di4, di5, dr2, dr3, dr4, dr5;
MULPM (dr2,di2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1));
MULPM (dr3,di3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2));
MULPM (dr4,di4,WA(2,i-2),WA(2,i-1),CC(i-1,k,3),CC(i,k,3));
MULPM (dr5,di5,WA(3,i-2),WA(3,i-1),CC(i-1,k,4),CC(i,k,4));
POCKETFFT_REARRANGE(dr2, di2, dr5, di5);
POCKETFFT_REARRANGE(dr3, di3, dr4, di4);
CH(i-1,0,k)=CC(i-1,k,0)+dr2+dr3;
CH(i ,0,k)=CC(i ,k,0)+di2+di3;
T tr2=CC(i-1,k,0)+tr11*dr2+tr12*dr3;
T ti2=CC(i ,k,0)+tr11*di2+tr12*di3;
T tr3=CC(i-1,k,0)+tr12*dr2+tr11*dr3;
T ti3=CC(i ,k,0)+tr12*di2+tr11*di3;
T tr5 = ti11*dr5 + ti12*dr4;
T ti5 = ti11*di5 + ti12*di4;
T tr4 = ti12*dr5 - ti11*dr4;
T ti4 = ti12*di5 - ti11*di4;
PM(CH(i-1,2,k),CH(ic-1,1,k),tr2,tr5);
PM(CH(i ,2,k),CH(ic ,1,k),ti5,ti2);
PM(CH(i-1,4,k),CH(ic-1,3,k),tr3,tr4);
PM(CH(i ,4,k),CH(ic ,3,k),ti4,ti3);
}
}
#undef POCKETFFT_REARRANGE
template<typename T> void radfg(size_t ido, size_t ip, size_t l1,
T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const T0 * POCKETFFT_RESTRICT wa, const T0 * POCKETFFT_RESTRICT csarr)
{
const size_t cdim=ip;
size_t ipph=(ip+1)/2;
size_t idl1 = ido*l1;
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> T&
{ return cc[a+ido*(b+cdim*c)]; };
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> const T&
{ return ch[a+ido*(b+l1*c)]; };
auto C1 = [cc,ido,l1] (size_t a, size_t b, size_t c) -> T&
{ return cc[a+ido*(b+l1*c)]; };
auto C2 = [cc,idl1] (size_t a, size_t b) -> T&
{ return cc[a+idl1*b]; };
auto CH2 = [ch,idl1] (size_t a, size_t b) -> T&
{ return ch[a+idl1*b]; };
if (ido>1)
{
for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 114
{
size_t is=(j-1)*(ido-1),
is2=(jc-1)*(ido-1);
for (size_t k=0; k<l1; ++k) // 113
{
size_t idij=is;
size_t idij2=is2;
for (size_t i=1; i<=ido-2; i+=2) // 112
{
T t1=C1(i,k,j ), t2=C1(i+1,k,j ),
t3=C1(i,k,jc), t4=C1(i+1,k,jc);
T x1=wa[idij]*t1 + wa[idij+1]*t2,
x2=wa[idij]*t2 - wa[idij+1]*t1,
x3=wa[idij2]*t3 + wa[idij2+1]*t4,
x4=wa[idij2]*t4 - wa[idij2+1]*t3;
C1(i ,k,j ) = x1+x3;
C1(i ,k,jc) = x2-x4;
C1(i+1,k,j ) = x2+x4;
C1(i+1,k,jc) = x3-x1;
idij+=2;
idij2+=2;
}
}
}
}
for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 123
for (size_t k=0; k<l1; ++k) // 122
{
T t1=C1(0,k,j), t2=C1(0,k,jc);
C1(0,k,j ) = t1+t2;
C1(0,k,jc) = t2-t1;
}
//everything in C
//memset(ch,0,ip*l1*ido*sizeof(double));
for (size_t l=1,lc=ip-1; l<ipph; ++l,--lc) // 127
{
for (size_t ik=0; ik<idl1; ++ik) // 124
{
CH2(ik,l ) = C2(ik,0)+csarr[2*l]*C2(ik,1)+csarr[4*l]*C2(ik,2);
CH2(ik,lc) = csarr[2*l+1]*C2(ik,ip-1)+csarr[4*l+1]*C2(ik,ip-2);
}
size_t iang = 2*l;
size_t j=3, jc=ip-3;
for (; j<ipph-3; j+=4,jc-=4) // 126
{
iang+=l; if (iang>=ip) iang-=ip;
T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1];
iang+=l; if (iang>=ip) iang-=ip;
T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1];
iang+=l; if (iang>=ip) iang-=ip;
T0 ar3=csarr[2*iang], ai3=csarr[2*iang+1];
iang+=l; if (iang>=ip) iang-=ip;
T0 ar4=csarr[2*iang], ai4=csarr[2*iang+1];
for (size_t ik=0; ik<idl1; ++ik) // 125
{
CH2(ik,l ) += ar1*C2(ik,j )+ar2*C2(ik,j +1)
+ar3*C2(ik,j +2)+ar4*C2(ik,j +3);
CH2(ik,lc) += ai1*C2(ik,jc)+ai2*C2(ik,jc-1)
+ai3*C2(ik,jc-2)+ai4*C2(ik,jc-3);
}
}
for (; j<ipph-1; j+=2,jc-=2) // 126
{
iang+=l; if (iang>=ip) iang-=ip;
T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1];
iang+=l; if (iang>=ip) iang-=ip;
T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1];
for (size_t ik=0; ik<idl1; ++ik) // 125
{
CH2(ik,l ) += ar1*C2(ik,j )+ar2*C2(ik,j +1);
CH2(ik,lc) += ai1*C2(ik,jc)+ai2*C2(ik,jc-1);
}
}
for (; j<ipph; ++j,--jc) // 126
{
iang+=l; if (iang>=ip) iang-=ip;
T0 ar=csarr[2*iang], ai=csarr[2*iang+1];
for (size_t ik=0; ik<idl1; ++ik) // 125
{
CH2(ik,l ) += ar*C2(ik,j );
CH2(ik,lc) += ai*C2(ik,jc);
}
}
}
for (size_t ik=0; ik<idl1; ++ik) // 101
CH2(ik,0) = C2(ik,0);
for (size_t j=1; j<ipph; ++j) // 129
for (size_t ik=0; ik<idl1; ++ik) // 128
CH2(ik,0) += C2(ik,j);
// everything in CH at this point!
//memset(cc,0,ip*l1*ido*sizeof(double));
for (size_t k=0; k<l1; ++k) // 131
for (size_t i=0; i<ido; ++i) // 130
CC(i,0,k) = CH(i,k,0);
for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 137
{
size_t j2=2*j-1;
for (size_t k=0; k<l1; ++k) // 136
{
CC(ido-1,j2,k) = CH(0,k,j);
CC(0,j2+1,k) = CH(0,k,jc);
}
}
if (ido==1) return;
for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 140
{
size_t j2=2*j-1;
for(size_t k=0; k<l1; ++k) // 139
for(size_t i=1, ic=ido-i-2; i<=ido-2; i+=2, ic-=2) // 138
{
CC(i ,j2+1,k) = CH(i ,k,j )+CH(i ,k,jc);
CC(ic ,j2 ,k) = CH(i ,k,j )-CH(i ,k,jc);
CC(i+1 ,j2+1,k) = CH(i+1,k,j )+CH(i+1,k,jc);
CC(ic+1,j2 ,k) = CH(i+1,k,jc)-CH(i+1,k,j );
}
}
}
template<typename T> void radb2(size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const T0 * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=2;
auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
for (size_t k=0; k<l1; k++)
PM (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(ido-1,1,k));
if ((ido&1)==0)
for (size_t k=0; k<l1; k++)
{
CH(ido-1,k,0) = 2*CC(ido-1,0,k);
CH(ido-1,k,1) =-2*CC(0 ,1,k);
}
if (ido<=2) return;
for (size_t k=0; k<l1;++k)
for (size_t i=2; i<ido; i+=2)
{
size_t ic=ido-i;
T ti2, tr2;
PM (CH(i-1,k,0),tr2,CC(i-1,0,k),CC(ic-1,1,k));
PM (ti2,CH(i ,k,0),CC(i ,0,k),CC(ic ,1,k));
MULPM (CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),ti2,tr2);
}
}
template<typename T> void radb3(size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const T0 * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=3;
constexpr T0 taur=-0.5, taui=T0(0.8660254037844386467637231707529362L);
auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
for (size_t k=0; k<l1; k++)
{
T tr2=2*CC(ido-1,1,k);
T cr2=CC(0,0,k)+taur*tr2;
CH(0,k,0)=CC(0,0,k)+tr2;
T ci3=2*taui*CC(0,2,k);
PM (CH(0,k,2),CH(0,k,1),cr2,ci3);
}
if (ido==1) return;
for (size_t k=0; k<l1; k++)
for (size_t i=2, ic=ido-2; i<ido; i+=2, ic-=2)
{
T tr2=CC(i-1,2,k)+CC(ic-1,1,k); // t2=CC(I) + conj(CC(ic))
T ti2=CC(i ,2,k)-CC(ic ,1,k);
T cr2=CC(i-1,0,k)+taur*tr2; // c2=CC +taur*t2
T ci2=CC(i ,0,k)+taur*ti2;
CH(i-1,k,0)=CC(i-1,0,k)+tr2; // CH=CC+t2
CH(i ,k,0)=CC(i ,0,k)+ti2;
T cr3=taui*(CC(i-1,2,k)-CC(ic-1,1,k));// c3=taui*(CC(i)-conj(CC(ic)))
T ci3=taui*(CC(i ,2,k)+CC(ic ,1,k));
T di2, di3, dr2, dr3;
PM(dr3,dr2,cr2,ci3); // d2= (cr2-ci3, ci2+cr3) = c2+i*c3
PM(di2,di3,ci2,cr3); // d3= (cr2+ci3, ci2-cr3) = c2-i*c3
MULPM(CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),di2,dr2); // ch = WA*d2
MULPM(CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),di3,dr3);
}
}
template<typename T> void radb4(size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const T0 * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=4;
constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
for (size_t k=0; k<l1; k++)
{
T tr1, tr2;
PM (tr2,tr1,CC(0,0,k),CC(ido-1,3,k));
T tr3=2*CC(ido-1,1,k);
T tr4=2*CC(0,2,k);
PM (CH(0,k,0),CH(0,k,2),tr2,tr3);
PM (CH(0,k,3),CH(0,k,1),tr1,tr4);
}
if ((ido&1)==0)
for (size_t k=0; k<l1; k++)
{
T tr1,tr2,ti1,ti2;
PM (ti1,ti2,CC(0 ,3,k),CC(0 ,1,k));
PM (tr2,tr1,CC(ido-1,0,k),CC(ido-1,2,k));
CH(ido-1,k,0)=tr2+tr2;
CH(ido-1,k,1)=sqrt2*(tr1-ti1);
CH(ido-1,k,2)=ti2+ti2;
CH(ido-1,k,3)=-sqrt2*(tr1+ti1);
}
if (ido<=2) return;
for (size_t k=0; k<l1;++k)
for (size_t i=2; i<ido; i+=2)
{
T ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
size_t ic=ido-i;
PM (tr2,tr1,CC(i-1,0,k),CC(ic-1,3,k));
PM (ti1,ti2,CC(i ,0,k),CC(ic ,3,k));
PM (tr4,ti3,CC(i ,2,k),CC(ic ,1,k));
PM (tr3,ti4,CC(i-1,2,k),CC(ic-1,1,k));
PM (CH(i-1,k,0),cr3,tr2,tr3);
PM (CH(i ,k,0),ci3,ti2,ti3);
PM (cr4,cr2,tr1,tr4);
PM (ci2,ci4,ti1,ti4);
MULPM (CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),ci2,cr2);
MULPM (CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),ci3,cr3);
MULPM (CH(i,k,3),CH(i-1,k,3),WA(2,i-2),WA(2,i-1),ci4,cr4);
}
}
template<typename T> void radb5(size_t ido, size_t l1,
const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const T0 * POCKETFFT_RESTRICT wa)
{
constexpr size_t cdim=5;
constexpr T0 tr11= T0(0.3090169943749474241022934171828191L),
ti11= T0(0.9510565162951535721164393333793821L),
tr12= T0(-0.8090169943749474241022934171828191L),
ti12= T0(0.5877852522924731291687059546390728L);
auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
for (size_t k=0; k<l1; k++)
{
T ti5=CC(0,2,k)+CC(0,2,k);
T ti4=CC(0,4,k)+CC(0,4,k);
T tr2=CC(ido-1,1,k)+CC(ido-1,1,k);
T tr3=CC(ido-1,3,k)+CC(ido-1,3,k);
CH(0,k,0)=CC(0,0,k)+tr2+tr3;
T cr2=CC(0,0,k)+tr11*tr2+tr12*tr3;
T cr3=CC(0,0,k)+tr12*tr2+tr11*tr3;
T ci4, ci5;
MULPM(ci5,ci4,ti5,ti4,ti11,ti12);
PM(CH(0,k,4),CH(0,k,1),cr2,ci5);
PM(CH(0,k,3),CH(0,k,2),cr3,ci4);
}
if (ido==1) return;
for (size_t k=0; k<l1;++k)
for (size_t i=2, ic=ido-2; i<ido; i+=2, ic-=2)
{
T tr2, tr3, tr4, tr5, ti2, ti3, ti4, ti5;
PM(tr2,tr5,CC(i-1,2,k),CC(ic-1,1,k));
PM(ti5,ti2,CC(i ,2,k),CC(ic ,1,k));
PM(tr3,tr4,CC(i-1,4,k),CC(ic-1,3,k));
PM(ti4,ti3,CC(i ,4,k),CC(ic ,3,k));
CH(i-1,k,0)=CC(i-1,0,k)+tr2+tr3;
CH(i ,k,0)=CC(i ,0,k)+ti2+ti3;
T cr2=CC(i-1,0,k)+tr11*tr2+tr12*tr3;
T ci2=CC(i ,0,k)+tr11*ti2+tr12*ti3;
T cr3=CC(i-1,0,k)+tr12*tr2+tr11*tr3;
T ci3=CC(i ,0,k)+tr12*ti2+tr11*ti3;
T ci4, ci5, cr5, cr4;
MULPM(cr5,cr4,tr5,tr4,ti11,ti12);
MULPM(ci5,ci4,ti5,ti4,ti11,ti12);
T dr2, dr3, dr4, dr5, di2, di3, di4, di5;
PM(dr4,dr3,cr3,ci4);
PM(di3,di4,ci3,cr4);
PM(dr5,dr2,cr2,ci5);
PM(di2,di5,ci2,cr5);
MULPM(CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),di2,dr2);
MULPM(CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),di3,dr3);
MULPM(CH(i,k,3),CH(i-1,k,3),WA(2,i-2),WA(2,i-1),di4,dr4);
MULPM(CH(i,k,4),CH(i-1,k,4),WA(3,i-2),WA(3,i-1),di5,dr5);
}
}
template<typename T> void radbg(size_t ido, size_t ip, size_t l1,
T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
const T0 * POCKETFFT_RESTRICT wa, const T0 * POCKETFFT_RESTRICT csarr)
{
const size_t cdim=ip;
size_t ipph=(ip+1)/ 2;
size_t idl1 = ido*l1;
auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+cdim*c)]; };
auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
{ return ch[a+ido*(b+l1*c)]; };
auto C1 = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T&
{ return cc[a+ido*(b+l1*c)]; };
auto C2 = [cc,idl1](size_t a, size_t b) -> T&
{ return cc[a+idl1*b]; };
auto CH2 = [ch,idl1](size_t a, size_t b) -> T&
{ return ch[a+idl1*b]; };
for (size_t k=0; k<l1; ++k) // 102
for (size_t i=0; i<ido; ++i) // 101
CH(i,k,0) = CC(i,0,k);
for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) // 108
{
size_t j2=2*j-1;
for (size_t k=0; k<l1; ++k)
{
CH(0,k,j ) = 2*CC(ido-1,j2,k);
CH(0,k,jc) = 2*CC(0,j2+1,k);
}
}
if (ido!=1)
{
for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 111
{
size_t j2=2*j-1;
for (size_t k=0; k<l1; ++k)
for (size_t i=1, ic=ido-i-2; i<=ido-2; i+=2, ic-=2) // 109
{
CH(i ,k,j ) = CC(i ,j2+1,k)+CC(ic ,j2,k);
CH(i ,k,jc) = CC(i ,j2+1,k)-CC(ic ,j2,k);
CH(i+1,k,j ) = CC(i+1,j2+1,k)-CC(ic+1,j2,k);
CH(i+1,k,jc) = CC(i+1,j2+1,k)+CC(ic+1,j2,k);
}
}
}
for (size_t l=1,lc=ip-1; l<ipph; ++l,--lc)
{
for (size_t ik=0; ik<idl1; ++ik)
{
C2(ik,l ) = CH2(ik,0)+csarr[2*l]*CH2(ik,1)+csarr[4*l]*CH2(ik,2);
C2(ik,lc) = csarr[2*l+1]*CH2(ik,ip-1)+csarr[4*l+1]*CH2(ik,ip-2);
}
size_t iang=2*l;
size_t j=3,jc=ip-3;
for(; j<ipph-3; j+=4,jc-=4)
{
iang+=l; if(iang>ip) iang-=ip;
T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1];
iang+=l; if(iang>ip) iang-=ip;
T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1];
iang+=l; if(iang>ip) iang-=ip;
T0 ar3=csarr[2*iang], ai3=csarr[2*iang+1];
iang+=l; if(iang>ip) iang-=ip;
T0 ar4=csarr[2*iang], ai4=csarr[2*iang+1];
for (size_t ik=0; ik<idl1; ++ik)
{
C2(ik,l ) += ar1*CH2(ik,j )+ar2*CH2(ik,j +1)
+ar3*CH2(ik,j +2)+ar4*CH2(ik,j +3);
C2(ik,lc) += ai1*CH2(ik,jc)+ai2*CH2(ik,jc-1)
+ai3*CH2(ik,jc-2)+ai4*CH2(ik,jc-3);
}
}
for(; j<ipph-1; j+=2,jc-=2)
{
iang+=l; if(iang>ip) iang-=ip;
T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1];
iang+=l; if(iang>ip) iang-=ip;
T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1];
for (size_t ik=0; ik<idl1; ++ik)
{
C2(ik,l ) += ar1*CH2(ik,j )+ar2*CH2(ik,j +1);
C2(ik,lc) += ai1*CH2(ik,jc)+ai2*CH2(ik,jc-1);
}
}
for(; j<ipph; ++j,--jc)
{
iang+=l; if(iang>ip) iang-=ip;
T0 war=csarr[2*iang], wai=csarr[2*iang+1];
for (size_t ik=0; ik<idl1; ++ik)
{
C2(ik,l ) += war*CH2(ik,j );
C2(ik,lc) += wai*CH2(ik,jc);
}
}
}
for (size_t j=1; j<ipph; ++j)
for (size_t ik=0; ik<idl1; ++ik)
CH2(ik,0) += CH2(ik,j);
for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 124
for (size_t k=0; k<l1; ++k)
{
CH(0,k,j ) = C1(0,k,j)-C1(0,k,jc);
CH(0,k,jc) = C1(0,k,j)+C1(0,k,jc);
}
if (ido==1) return;
for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) // 127
for (size_t k=0; k<l1; ++k)
for (size_t i=1; i<=ido-2; i+=2)
{
CH(i ,k,j ) = C1(i ,k,j)-C1(i+1,k,jc);
CH(i ,k,jc) = C1(i ,k,j)+C1(i+1,k,jc);
CH(i+1,k,j ) = C1(i+1,k,j)+C1(i ,k,jc);
CH(i+1,k,jc) = C1(i+1,k,j)-C1(i ,k,jc);
}
// All in CH
for (size_t j=1; j<ip; ++j)
{
size_t is = (j-1)*(ido-1);
for (size_t k=0; k<l1; ++k)
{
size_t idij = is;
for (size_t i=1; i<=ido-2; i+=2)
{
T t1=CH(i,k,j), t2=CH(i+1,k,j);
CH(i ,k,j) = wa[idij]*t1-wa[idij+1]*t2;
CH(i+1,k,j) = wa[idij]*t2+wa[idij+1]*t1;
idij+=2;
}
}
}
}
template<typename T> void copy_and_norm(T *c, T *p1, size_t n, T0 fct)
{
if (p1!=c)
{
if (fct!=1.)
for (size_t i=0; i<n; ++i)
c[i] = fct*p1[i];
else
memcpy (c,p1,n*sizeof(T));
}
else
if (fct!=1.)
for (size_t i=0; i<n; ++i)
c[i] *= fct;
}
public:
template<typename T> void forward(T c[], T0 fct)
{
if (length==1) { c[0]*=fct; return; }
size_t n=length;
size_t l1=n, nf=fact.size();
arr<T> ch(n);
T *p1=c, *p2=ch.data();
for(size_t k1=0; k1<nf;++k1)
{
size_t k=nf-k1-1;
size_t ip=fact[k].fct;
size_t ido=n / l1;
l1 /= ip;
if(ip==4)
radf4(ido, l1, p1, p2, fact[k].tw);
else if(ip==2)
radf2(ido, l1, p1, p2, fact[k].tw);
else if(ip==3)
radf3(ido, l1, p1, p2, fact[k].tw);
else if(ip==5)
radf5(ido, l1, p1, p2, fact[k].tw);
else
{ radfg(ido, ip, l1, p1, p2, fact[k].tw, fact[k].tws); swap (p1,p2); }
swap (p1,p2);
}
copy_and_norm(c,p1,n,fct);
}
template<typename T> void backward(T c[], T0 fct)
{
if (length==1) { c[0]*=fct; return; }
size_t n=length;
size_t l1=1, nf=fact.size();
arr<T> ch(n);
T *p1=c, *p2=ch.data();
for(size_t k=0; k<nf; k++)
{
size_t ip = fact[k].fct,
ido= n/(ip*l1);
if(ip==4)
radb4(ido, l1, p1, p2, fact[k].tw);
else if(ip==2)
radb2(ido, l1, p1, p2, fact[k].tw);
else if(ip==3)
radb3(ido, l1, p1, p2, fact[k].tw);
else if(ip==5)
radb5(ido, l1, p1, p2, fact[k].tw);
else
radbg(ido, ip, l1, p1, p2, fact[k].tw, fact[k].tws);
swap (p1,p2);
l1*=ip;
}
copy_and_norm(c,p1,n,fct);
}
private:
void factorize()
{
size_t len=length;
while ((len%4)==0)
{ add_factor(4); len>>=2; }
if ((len%2)==0)
{
len>>=1;
// factor 2 should be at the front of the factor list
add_factor(2);
swap(fact[0].fct, fact.back().fct);
}
for (size_t divisor=3; divisor*divisor<=len; divisor+=2)
while ((len%divisor)==0)
{
add_factor(divisor);
len/=divisor;
}
if (len>1) add_factor(len);
}
size_t twsize() const
{
size_t twsz=0, l1=1;
for (size_t k=0; k<fact.size(); ++k)
{
size_t ip=fact[k].fct, ido=length/(l1*ip);
twsz+=(ip-1)*(ido-1);
if (ip>5) twsz+=2*ip;
l1*=ip;
}
return twsz;
}
void comp_twiddle()
{
sincos_2pibyn<T0> twid(length, true);
size_t l1=1;
T0 *ptr=mem.data();
for (size_t k=0; k<fact.size(); ++k)
{
size_t ip=fact[k].fct, ido=length/(l1*ip);
if (k<fact.size()-1) // last factor doesn't need twiddles
{
fact[k].tw=ptr; ptr+=(ip-1)*(ido-1);
for (size_t j=1; j<ip; ++j)
for (size_t i=1; i<=(ido-1)/2; ++i)
{
fact[k].tw[(j-1)*(ido-1)+2*i-2] = twid[2*j*l1*i];
fact[k].tw[(j-1)*(ido-1)+2*i-1] = twid[2*j*l1*i+1];
}
}
if (ip>5) // special factors required by *g functions
{
fact[k].tws=ptr; ptr+=2*ip;
fact[k].tws[0] = 1.;
fact[k].tws[1] = 0.;
for (size_t i=2, ic=2*ip-2; i<=ic; i+=2, ic-=2)
{
fact[k].tws[i ] = twid[i*(length/ip)];
fact[k].tws[i+1] = twid[i*(length/ip)+1];
fact[k].tws[ic] = twid[i*(length/ip)];
fact[k].tws[ic+1] = -twid[i*(length/ip)+1];
}
}
l1*=ip;
}
}
public:
POCKETFFT_NOINLINE rfftp(size_t length_)
: length(length_)
{
if (length==0) throw runtime_error("zero-length FFT requested");
if (length==1) return;
factorize();
mem.resize(twsize());
comp_twiddle();
}
};
//
// complex Bluestein transforms
//
template<typename T0> class fftblue
{
private:
size_t n, n2;
cfftp<T0> plan;
arr<cmplx<T0>> mem;
cmplx<T0> *bk, *bkf;
template<bool fwd, typename T> void fft(cmplx<T> c[], T0 fct)
{
arr<cmplx<T>> akf(n2);
/* initialize a_k and FFT it */
for (size_t m=0; m<n; ++m)
akf[m] = c[m].template special_mul<fwd>(bk[m]);
auto zero = akf[0]*T0(0);
for (size_t m=n; m<n2; ++m)
akf[m]=zero;
plan.forward (akf.data(),1.);
/* do the convolution */
for (size_t m=0; m<n2; ++m)
akf[m] = akf[m].template special_mul<!fwd>(bkf[m]);
/* inverse FFT */
plan.backward (akf.data(),1.);
/* multiply by b_k */
for (size_t m=0; m<n; ++m)
c[m] = akf[m].template special_mul<fwd>(bk[m])*fct;
}
public:
POCKETFFT_NOINLINE fftblue(size_t length)
: n(length), n2(util::good_size(n*2-1)), plan(n2), mem(n+n2),
bk(mem.data()), bkf(mem.data()+n)
{
/* initialize b_k */
sincos_2pibyn<T0> tmp_(2*n, false);
auto tmp = tmp_.cdata();
bk[0].Set(1, 0);
size_t coeff=0;
for (size_t m=1; m<n; ++m)
{
coeff+=2*m-1;
if (coeff>=2*n) coeff-=2*n;
bk[m] = tmp[coeff];
}
/* initialize the zero-padded, Fourier transformed b_k. Add normalisation. */
T0 xn2 = T0(1)/T0(n2);
bkf[0] = bk[0]*xn2;
for (size_t m=1; m<n; ++m)
bkf[m] = bkf[n2-m] = bk[m]*xn2;
for (size_t m=n;m<=(n2-n);++m)
bkf[m].Set(0.,0.);
plan.forward(bkf,1.);
}
template<typename T> void backward(cmplx<T> c[], T0 fct)
{ fft<false>(c,fct); }
template<typename T> void forward(cmplx<T> c[], T0 fct)
{ fft<true>(c,fct); }
template<typename T> void backward_r(T c[], T0 fct)
{
arr<cmplx<T>> tmp(n);
tmp[0].Set(c[0],c[0]*0);
memcpy (reinterpret_cast<void *>(tmp.data()+1),
reinterpret_cast<void *>(c+1), (n-1)*sizeof(T));
if ((n&1)==0) tmp[n/2].i=T0(0)*c[0];
for (size_t m=1; 2*m<n; ++m)
tmp[n-m].Set(tmp[m].r, -tmp[m].i);
fft<false>(tmp.data(),fct);
for (size_t m=0; m<n; ++m)
c[m] = tmp[m].r;
}
template<typename T> void forward_r(T c[], T0 fct)
{
arr<cmplx<T>> tmp(n);
auto zero = T0(0)*c[0];
for (size_t m=0; m<n; ++m)
tmp[m].Set(c[m], zero);
fft<true>(tmp.data(),fct);
c[0] = tmp[0].r;
memcpy (c+1, tmp.data()+1, (n-1)*sizeof(T));
}
};
//
// flexible (FFTPACK/Bluestein) complex 1D transform
//
template<typename T0> class pocketfft_c
{
private:
unique_ptr<cfftp<T0>> packplan;
unique_ptr<fftblue<T0>> blueplan;
size_t len;
public:
POCKETFFT_NOINLINE pocketfft_c(size_t length)
: len(length)
{
if (length==0) throw runtime_error("zero-length FFT requested");
size_t tmp = (length<50) ? 0 : util::largest_prime_factor(length);
if (tmp*tmp <= length)
{
packplan=unique_ptr<cfftp<T0>>(new cfftp<T0>(length));
return;
}
double comp1 = util::cost_guess(length);
double comp2 = 2*util::cost_guess(util::good_size(2*length-1));
comp2*=1.5; /* fudge factor that appears to give good overall performance */
if (comp2<comp1) // use Bluestein
blueplan=unique_ptr<fftblue<T0>>(new fftblue<T0>(length));
else
packplan=unique_ptr<cfftp<T0>>(new cfftp<T0>(length));
}
template<typename T> POCKETFFT_NOINLINE void backward(cmplx<T> c[], T0 fct) const
{ packplan ? packplan->backward(c,fct) : blueplan->backward(c,fct); }
template<typename T> POCKETFFT_NOINLINE void forward(cmplx<T> c[], T0 fct) const
{ packplan ? packplan->forward(c,fct) : blueplan->forward(c,fct); }
size_t length() const { return len; }
};
//
// flexible (FFTPACK/Bluestein) real-valued 1D transform
//
template<typename T0> class pocketfft_r
{
private:
unique_ptr<rfftp<T0>> packplan;
unique_ptr<fftblue<T0>> blueplan;
size_t len;
public:
POCKETFFT_NOINLINE pocketfft_r(size_t length)
: len(length)
{
if (length==0) throw runtime_error("zero-length FFT requested");
size_t tmp = (length<50) ? 0 : util::largest_prime_factor(length);
if (tmp*tmp <= length)
{
packplan=unique_ptr<rfftp<T0>>(new rfftp<T0>(length));
return;
}
double comp1 = 0.5*util::cost_guess(length);
double comp2 = 2*util::cost_guess(util::good_size(2*length-1));
comp2*=1.5; /* fudge factor that appears to give good overall performance */
if (comp2<comp1) // use Bluestein
blueplan=unique_ptr<fftblue<T0>>(new fftblue<T0>(length));
else
packplan=unique_ptr<rfftp<T0>>(new rfftp<T0>(length));
}
template<typename T> POCKETFFT_NOINLINE void backward(T c[], T0 fct) const
{
packplan ? packplan->backward(c,fct)
: blueplan->backward_r(c,fct);
}
template<typename T> POCKETFFT_NOINLINE void forward(T c[], T0 fct) const
{
packplan ? packplan->forward(c,fct)
: blueplan->forward_r(c,fct);
}
size_t length() const { return len; }
};
//
// sine/cosine transforms
//
template<typename T0> class T_dct1
{
private:
pocketfft_r<T0> fftplan;
public:
POCKETFFT_NOINLINE T_dct1(size_t length)
: fftplan(2*(length-1)) {}
template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool ortho) const
{
constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
size_t N=fftplan.length(), n=N/2+1;
if (ortho)
{ c[0]*=sqrt2; c[n-1]*=sqrt2; }
arr<T> tmp(N);
tmp[0] = c[0];
for (size_t i=1; i<n; ++i)
tmp[i] = tmp[N-i] = c[i];
fftplan.forward(tmp.data(), fct);
c[0] = tmp[0];
for (size_t i=1; i<n; ++i)
c[i] = tmp[2*i-1];
if (ortho)
{ c[0]/=sqrt2; c[n-1]/=sqrt2; }
}
size_t length() const { return fftplan.length()/2+1; }
};
template<typename T0> class T_dct2
{
private:
pocketfft_r<T0> fftplan;
vector<T0> twiddle;
public:
POCKETFFT_NOINLINE T_dct2(size_t length)
: fftplan(length), twiddle(length)
{
constexpr T0 pi = T0(3.141592653589793238462643383279502884197L);
for (size_t i=0; i<length; ++i)
twiddle[i] = T0(cos(0.5*pi*T0(i+1)/T0(length)));
}
template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool ortho) const
{
constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
size_t N=length();
if (N==1)
c[0]*=2*fct;
else if (N==2)
{
T x1 = 2*fct*(c[0]+c[1]);
c[1] = sqrt2*fct*(c[0]-c[1]);
c[0] = x1;
}
else
{
size_t NS2 = (N+1)/2;
for (size_t i=2; i<N; i+=2)
{
T xim1 = T0(0.5)*(c[i-1]+c[i]);
c[i] = T0(0.5)*(c[i]-c[i-1]);
c[i-1] = xim1;
}
fftplan.backward(c, fct);
for (size_t k=1, kc=N-1; k<NS2; ++k, --kc)
{
T tmp = twiddle[k-1]*c[kc]+twiddle[kc-1]*c[k];
c[kc] = twiddle[k-1]*c[k]-twiddle[kc-1]*c[kc];
c[k] = tmp;
}
if ((N&1)==0)
c[NS2] = twiddle[NS2-1]*(c[NS2]+c[NS2]);
for (size_t k=1, kc=N-1; k<NS2; ++k, --kc)
{
T tmp = c[k]+c[kc];
c[kc] = c[k]-c[kc];
c[k] = tmp;
}
c[0] *= 2;
}
if (ortho) c[0]/=sqrt2;
}
size_t length() const { return fftplan.length(); }
};
template<typename T0> class T_dct3
{
private:
pocketfft_r<T0> fftplan;
vector<T0> twiddle;
public:
POCKETFFT_NOINLINE T_dct3(size_t length)
: fftplan(length), twiddle(length)
{
constexpr T0 pi = T0(3.141592653589793238462643383279502884197L);
for (size_t i=0; i<length; ++i)
twiddle[i] = T0(cos(0.5*pi*T0(i+1)/T0(length)));
}
template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool ortho) const
{
constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
size_t N=length();
if (ortho) c[0]*=sqrt2;
if (N==1)
c[0]*=fct;
else if (N==2)
{
T TSQX = sqrt2*c[1];
c[1] = fct*(c[0]-TSQX);
c[0] = fct*(c[0]+TSQX);
}
else
{
size_t NS2 = (N+1)/2;
for (size_t k=1, kc=N-1; k<NS2; ++k, --kc)
{
T tmp = c[k]-c[kc];
c[k] = c[k]+c[kc];
c[kc] = tmp;
}
if ((N&1)==0)
c[NS2] = c[NS2]+c[NS2];
for (size_t k=1, kc=N-1; k<NS2; ++k, --kc)
{
T tmp = twiddle[k-1]*c[k]-twiddle[kc-1]*c[kc];
c[k] = twiddle[k-1]*c[kc]+twiddle[kc-1]*c[k];
c[kc] = tmp;
}
if ((N&1)==0)
c[NS2] = twiddle[NS2-1]*c[NS2];
fftplan.forward(c, fct);
for (size_t i=2; i<N; i+=2)
{
T xim1 = c[i-1]-c[i];
c[i] += c[i-1];
c[i-1] = xim1;
}
}
}
size_t length() const { return fftplan.length(); }
};
template<typename T0> class T_dct4
{
// even length algorithm from
// https://www.appletonaudio.com/blog/2013/derivation-of-fast-dct-4-algorithm-based-on-dft/
private:
size_t N;
unique_ptr<pocketfft_c<T0>> fft;
unique_ptr<pocketfft_r<T0>> rfft;
arr<cmplx<T0>> C2;
public:
POCKETFFT_NOINLINE T_dct4(size_t length)
: N(length),
fft((N&1) ? nullptr : new pocketfft_c<T0>(N/2)),
rfft((N&1)? new pocketfft_r<T0>(N) : nullptr),
C2((N&1) ? 0 : N/2)
{
constexpr T0 pi = T0(3.141592653589793238462643383279502884197L);
if ((N&1)==0)
for (size_t i=0; i<N/2; ++i)
{
T0 ang = -pi/T0(N)*(T0(i)+T0(0.125));
C2[i].Set(cos(ang), sin(ang));
}
}
template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool /*ortho*/) const
{
constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
if (N&1)
{
// The following code is derived from the FFTW3 function apply_re11()
// and is released under the 3-clause BSD license with friendly
// permission of Matteo Frigo.
auto SGN_SET = [](T x, size_t i) {return (i%2) ? -x : x;};
arr<T> y(N);
size_t n2 = N/2;
size_t i;
{
size_t m;
for (i=0, m=n2; m<N; ++i, m+=4)
y[i] = c[m];
for (; m<2*N; ++i, m+=4)
y[i] = -c[2*N-m-1];
for (; m<3*N; ++i, m+=4)
y[i] = -c[m-2*N];
for (; m<4*N; ++i, m+=4)
y[i] = c[4*N-m-1];
m -= 4*N;
for (; i<N; ++i, m+=4)
y[i] = c[m];
}
rfft->forward(y.data(), fct);
for (i=0; i+i+1<n2; ++i)
{
size_t k = i+i+1;
T c1=y[2*k-1], s1=y[2*k], c2=y[2*k+1], s2=y[2*k+2];
c[i] = sqrt2 * (SGN_SET(c1, (i+1)/2) + SGN_SET(s1, i/2));
c[N-(i+1)] = sqrt2 * (SGN_SET(c1, (N-i)/2) - SGN_SET(s1, (N-(i+1))/2));
c[n2-(i+1)] = sqrt2 * (SGN_SET(c2, (n2-i)/2) - SGN_SET(s2, (n2-(i+1))/2));
c[n2+(i+1)] = sqrt2 * (SGN_SET(c2, (n2+i+2)/2) + SGN_SET(s2, (n2+(i+1))/2));
}
if (i+i+1 == n2)
{
T cx=y[2*n2-1], sx=y[2*n2];
c[i] = sqrt2 * (SGN_SET(cx, (i+1)/2) + SGN_SET(sx, i/2));
c[N-(i+1)] = sqrt2 * (SGN_SET(cx, (i+2)/2) + SGN_SET(sx, (i+1)/2));
}
c[n2] = sqrt2 * SGN_SET(y[0], (n2+1)/2);
// FFTW-derived code ends here
}
else
{
arr<cmplx<T>> y(N/2);
for(size_t i=0; i<N/2; ++i)
{
y[i].Set(c[2*i],c[N-1-2*i]);
y[i] *= C2[i];
}
fft->forward(y.data(), fct);
for(size_t i=0; i<N/2; ++i)
y[i] *= C2[i];
for(size_t i=0; i<N/2; ++i)
{
c[2*i] = 2*y[i].r;
c[2*i+1] = -2*y[N/2-1-i].i;
}
}
}
size_t length() const { return N; }
};
template<typename T0> class T_dst1
{
private:
pocketfft_r<T0> fftplan;
public:
POCKETFFT_NOINLINE T_dst1(size_t length)
: fftplan(2*(length+1)) {}
template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool /*ortho*/) const
{
size_t N=fftplan.length(), n=N/2-1;
arr<T> tmp(N);
tmp[0] = tmp[n+1] = c[0]*0;
for (size_t i=0; i<n; ++i)
{
tmp[i+1] = c[i];
tmp[N-1-i] = -c[i];
}
fftplan.forward(tmp.data(), fct);
for (size_t i=0; i<n; ++i)
c[i] = -tmp[2*i+2];
}
size_t length() const { return fftplan.length()/2-1; }
};
template<typename T0> class T_dst2
{
private:
T_dct2<T0> dct;
public:
POCKETFFT_NOINLINE T_dst2(size_t length)
: dct(length) {}
template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool ortho) const
{
constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
size_t N=length();
if (N==1)
c[0]*=2*fct;
else
{
for (size_t k=1; k<N; k+=2)
c[k] = -c[k];
dct.exec(c, fct, false);
for (size_t k=0, kc=N-1; k<kc; ++k, --kc)
swap(c[k], c[kc]);
}
if (ortho) c[0]/=sqrt2;
}
size_t length() const { return dct.length(); }
};
template<typename T0> class T_dst3
{
private:
T_dct3<T0> dct;
public:
POCKETFFT_NOINLINE T_dst3(size_t length)
: dct(length) {}
template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool ortho)
{
constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
size_t N=length();
if (ortho) c[0]*=sqrt2;
if (N==1)
c[0]*=fct;
else
{
size_t NS2 = N/2;
for (size_t k=0, kc=N-1; k<NS2; ++k, --kc)
swap(c[k], c[kc]);
dct.exec(c, fct, false);
for (size_t k=1; k<N; k+=2)
c[k] = -c[k];
}
}
size_t length() const { return dct.length(); }
};
template<typename T0> class T_dst4
{
private:
T_dct4<T0> dct;
public:
POCKETFFT_NOINLINE T_dst4(size_t length)
: dct(length) {}
template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool /*ortho*/)
{
size_t N=length();
//if (N==1) { c[0]*=fct; return; }
size_t NS2 = N/2;
for (size_t k=0, kc=N-1; k<NS2; ++k, --kc)
swap(c[k], c[kc]);
dct.exec(c, fct, false);
for (size_t k=1; k<N; k+=2)
c[k] = -c[k];
}
size_t length() const { return dct.length(); }
};
//
// multi-D infrastructure
//
template<typename T> shared_ptr<T> get_plan(size_t length)
{
#if POCKETFFT_CACHE_SIZE==0
return make_shared<T>(length);
#else
constexpr size_t nmax=POCKETFFT_CACHE_SIZE;
static array<shared_ptr<T>, nmax> cache;
static array<size_t, nmax> last_access{{0}};
static size_t access_counter = 0;
static mutex mut;
auto find_in_cache = [&]() -> shared_ptr<T>
{
for (size_t i=0; i<nmax; ++i)
if (cache[i] && (cache[i]->length()==length))
{
// no need to update if this is already the most recent entry
if (last_access[i]!=access_counter)
{
last_access[i] = ++access_counter;
// Guard against overflow
if (access_counter == 0)
last_access.fill(0);
}
return cache[i];
}
return nullptr;
};
{
lock_guard<mutex> lock(mut);
auto p = find_in_cache();
if (p) return p;
}
auto plan = make_shared<T>(length);
{
lock_guard<mutex> lock(mut);
auto p = find_in_cache();
if (p) return p;
size_t lru = 0;
for (size_t i=1; i<nmax; ++i)
if (last_access[i] < last_access[lru])
lru = i;
cache[lru] = plan;
last_access[lru] = ++access_counter;
}
return plan;
#endif
}
class arr_info
{
protected:
shape_t shp;
stride_t str;
public:
arr_info(const shape_t &shape_, const stride_t &stride_)
: shp(shape_), str(stride_) {}
size_t ndim() const { return shp.size(); }
size_t size() const { return util::prod(shp); }
const shape_t &shape() const { return shp; }
size_t shape(size_t i) const { return shp[i]; }
const stride_t &stride() const { return str; }
const ptrdiff_t &stride(size_t i) const { return str[i]; }
};
template<typename T> class cndarr: public arr_info
{
protected:
const char *d;
public:
cndarr(const void *data_, const shape_t &shape_, const stride_t &stride_)
: arr_info(shape_, stride_),
d(reinterpret_cast<const char *>(data_)) {}
const T &operator[](ptrdiff_t ofs) const
{ return *reinterpret_cast<const T *>(d+ofs); }
};
template<typename T> class ndarr: public cndarr<T>
{
public:
ndarr(void *data_, const shape_t &shape_, const stride_t &stride_)
: cndarr<T>::cndarr(const_cast<const void *>(data_), shape_, stride_)
{}
T &operator[](ptrdiff_t ofs)
{ return *reinterpret_cast<T *>(const_cast<char *>(cndarr<T>::d+ofs)); }
};
template<size_t N> class multi_iter
{
private:
shape_t pos;
const arr_info &iarr, &oarr;
ptrdiff_t p_ii, p_i[N], str_i, p_oi, p_o[N], str_o;
size_t idim, rem;
void advance_i()
{
for (int i_=int(pos.size())-1; i_>=0; --i_)
{
auto i = size_t(i_);
if (i==idim) continue;
p_ii += iarr.stride(i);
p_oi += oarr.stride(i);
if (++pos[i] < iarr.shape(i))
return;
pos[i] = 0;
p_ii -= ptrdiff_t(iarr.shape(i))*iarr.stride(i);
p_oi -= ptrdiff_t(oarr.shape(i))*oarr.stride(i);
}
}
public:
multi_iter(const arr_info &iarr_, const arr_info &oarr_, size_t idim_)
: pos(iarr_.ndim(), 0), iarr(iarr_), oarr(oarr_), p_ii(0),
str_i(iarr.stride(idim_)), p_oi(0), str_o(oarr.stride(idim_)),
idim(idim_), rem(iarr.size()/iarr.shape(idim))
{
auto nshares = util::nthreads();
if (nshares==1) return;
if (nshares==0) throw runtime_error("can't run with zero threads");
auto myshare = util::thread_num();
if (myshare>=nshares) throw runtime_error("impossible share requested");
size_t nbase = rem/nshares;
size_t additional = rem%nshares;
size_t lo = myshare*nbase + ((myshare<additional) ? myshare : additional);
size_t hi = lo+nbase+(myshare<additional);
size_t todo = hi-lo;
size_t chunk = rem;
for (size_t i=0; i<pos.size(); ++i)
{
if (i==idim) continue;
chunk /= iarr.shape(i);
size_t n_advance = lo/chunk;
pos[i] += n_advance;
p_ii += ptrdiff_t(n_advance)*iarr.stride(i);
p_oi += ptrdiff_t(n_advance)*oarr.stride(i);
lo -= n_advance*chunk;
}
rem = todo;
}
void advance(size_t n)
{
if (rem<n) throw runtime_error("underrun");
for (size_t i=0; i<n; ++i)
{
p_i[i] = p_ii;
p_o[i] = p_oi;
advance_i();
}
rem -= n;
}
ptrdiff_t iofs(size_t i) const { return p_i[0] + ptrdiff_t(i)*str_i; }
ptrdiff_t iofs(size_t j, size_t i) const { return p_i[j] + ptrdiff_t(i)*str_i; }
ptrdiff_t oofs(size_t i) const { return p_o[0] + ptrdiff_t(i)*str_o; }
ptrdiff_t oofs(size_t j, size_t i) const { return p_o[j] + ptrdiff_t(i)*str_o; }
size_t length_in() const { return iarr.shape(idim); }
size_t length_out() const { return oarr.shape(idim); }
ptrdiff_t stride_in() const { return str_i; }
ptrdiff_t stride_out() const { return str_o; }
size_t remaining() const { return rem; }
};
class simple_iter
{
private:
shape_t pos;
const arr_info &arr;
ptrdiff_t p;
size_t rem;
public:
simple_iter(const arr_info &arr_)
: pos(arr_.ndim(), 0), arr(arr_), p(0), rem(arr_.size()) {}
void advance()
{
--rem;
for (int i_=int(pos.size())-1; i_>=0; --i_)
{
auto i = size_t(i_);
p += arr.stride(i);
if (++pos[i] < arr.shape(i))
return;
pos[i] = 0;
p -= ptrdiff_t(arr.shape(i))*arr.stride(i);
}
}
ptrdiff_t ofs() const { return p; }
size_t remaining() const { return rem; }
};
class rev_iter
{
private:
shape_t pos;
const arr_info &arr;
vector<char> rev_axis;
vector<char> rev_jump;
size_t last_axis, last_size;
shape_t shp;
ptrdiff_t p, rp;
size_t rem;
public:
rev_iter(const arr_info &arr_, const shape_t &axes)
: pos(arr_.ndim(), 0), arr(arr_), rev_axis(arr_.ndim(), 0),
rev_jump(arr_.ndim(), 1), p(0), rp(0)
{
for (auto ax: axes)
rev_axis[ax]=1;
last_axis = axes.back();
last_size = arr.shape(last_axis)/2 + 1;
shp = arr.shape();
shp[last_axis] = last_size;
rem=1;
for (auto i: shp)
rem *= i;
}
void advance()
{
--rem;
for (int i_=int(pos.size())-1; i_>=0; --i_)
{
auto i = size_t(i_);
p += arr.stride(i);
if (!rev_axis[i])
rp += arr.stride(i);
else
{
rp -= arr.stride(i);
if (rev_jump[i])
{
rp += ptrdiff_t(arr.shape(i))*arr.stride(i);
rev_jump[i] = 0;
}
}
if (++pos[i] < shp[i])
return;
pos[i] = 0;
p -= ptrdiff_t(shp[i])*arr.stride(i);
if (rev_axis[i])
{
rp -= ptrdiff_t(arr.shape(i)-shp[i])*arr.stride(i);
rev_jump[i] = 1;
}
else
rp -= ptrdiff_t(shp[i])*arr.stride(i);
}
}
ptrdiff_t ofs() const { return p; }
ptrdiff_t rev_ofs() const { return rp; }
size_t remaining() const { return rem; }
};
#ifndef POCKETFFT_NO_VECTORS
template<typename T> struct VTYPE {};
template<> struct VTYPE<float>
{
using type = float __attribute__ ((vector_size (VLEN<float>::val*sizeof(float))));
};
template<> struct VTYPE<double>
{
using type = double __attribute__ ((vector_size (VLEN<double>::val*sizeof(double))));
};
template<> struct VTYPE<long double>
{
using type = long double __attribute__ ((vector_size (VLEN<long double>::val*sizeof(long double))));
};
#endif
template<typename T> arr<char> alloc_tmp(const shape_t &shape,
size_t axsize, size_t elemsize)
{
auto othersize = util::prod(shape)/axsize;
auto tmpsize = axsize*((othersize>=VLEN<T>::val) ? VLEN<T>::val : 1);
return arr<char>(tmpsize*elemsize);
}
template<typename T> arr<char> alloc_tmp(const shape_t &shape,
const shape_t &axes, size_t elemsize)
{
size_t fullsize=util::prod(shape);
size_t tmpsize=0;
for (size_t i=0; i<axes.size(); ++i)
{
auto axsize = shape[axes[i]];
auto othersize = fullsize/axsize;
auto sz = axsize*((othersize>=VLEN<T>::val) ? VLEN<T>::val : 1);
if (sz>tmpsize) tmpsize=sz;
}
return arr<char>(tmpsize*elemsize);
}
#ifdef POCKETFFT_OPENMP
#define POCKETFFT_NTHREADS nthreads
#else
#define POCKETFFT_NTHREADS
#endif
template<typename T> POCKETFFT_NOINLINE void general_c(
const cndarr<cmplx<T>> &in, ndarr<cmplx<T>> &out,
const shape_t &axes, bool forward, T fct, size_t POCKETFFT_NTHREADS)
{
shared_ptr<pocketfft_c<T>> plan;
for (size_t iax=0; iax<axes.size(); ++iax)
{
constexpr auto vlen = VLEN<T>::val;
size_t len=in.shape(axes[iax]);
if ((!plan) || (len!=plan->length()))
plan = get_plan<pocketfft_c<T>>(len);
#ifdef POCKETFFT_OPENMP
#pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axes[iax]))
#endif
{
auto storage = alloc_tmp<T>(in.shape(), len, sizeof(cmplx<T>));
const auto &tin(iax==0? in : out);
multi_iter<vlen> it(tin, out, axes[iax]);
#ifndef POCKETFFT_NO_VECTORS
if (vlen>1)
while (it.remaining()>=vlen)
{
using vtype = typename VTYPE<T>::type;
it.advance(vlen);
auto tdatav = reinterpret_cast<cmplx<vtype> *>(storage.data());
for (size_t i=0; i<len; ++i)
for (size_t j=0; j<vlen; ++j)
{
tdatav[i].r[j] = tin[it.iofs(j,i)].r;
tdatav[i].i[j] = tin[it.iofs(j,i)].i;
}
forward ? plan->forward (tdatav, fct) : plan->backward(tdatav, fct);
for (size_t i=0; i<len; ++i)
for (size_t j=0; j<vlen; ++j)
out[it.oofs(j,i)].Set(tdatav[i].r[j],tdatav[i].i[j]);
}
#endif
while (it.remaining()>0)
{
it.advance(1);
auto tdata = reinterpret_cast<cmplx<T> *>(storage.data());
if ((&tin[0]==&out[0]) && (it.stride_out()==sizeof(cmplx<T>))) // fully in-place
forward ? plan->forward (&out[it.oofs(0)], fct)
: plan->backward(&out[it.oofs(0)], fct);
else if (it.stride_out()==sizeof(cmplx<T>)) // compute FFT in output location
{
for (size_t i=0; i<len; ++i)
out[it.oofs(i)] = tin[it.iofs(i)];
forward ? plan->forward (&out[it.oofs(0)], fct)
: plan->backward(&out[it.oofs(0)], fct);
}
else
{
for (size_t i=0; i<len; ++i)
tdata[i] = tin[it.iofs(i)];
forward ? plan->forward (tdata, fct) : plan->backward(tdata, fct);
for (size_t i=0; i<len; ++i)
out[it.oofs(i)] = tdata[i];
}
}
} // end of parallel region
fct = T(1); // factor has been applied, use 1 for remaining axes
}
}
template<typename T> POCKETFFT_NOINLINE void general_hartley(
const cndarr<T> &in, ndarr<T> &out, const shape_t &axes, T fct,
size_t POCKETFFT_NTHREADS)
{
shared_ptr<pocketfft_r<T>> plan;
for (size_t iax=0; iax<axes.size(); ++iax)
{
constexpr auto vlen = VLEN<T>::val;
size_t len=in.shape(axes[iax]);
if ((!plan) || (len!=plan->length()))
plan = get_plan<pocketfft_r<T>>(len);
#ifdef POCKETFFT_OPENMP
#pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axes[iax]))
#endif
{
auto storage = alloc_tmp<T>(in.shape(), len, sizeof(T));
const auto &tin(iax==0 ? in : out);
multi_iter<vlen> it(tin, out, axes[iax]);
#ifndef POCKETFFT_NO_VECTORS
if (vlen>1)
while (it.remaining()>=vlen)
{
using vtype = typename VTYPE<T>::type;
it.advance(vlen);
auto tdatav = reinterpret_cast<vtype *>(storage.data());
for (size_t i=0; i<len; ++i)
for (size_t j=0; j<vlen; ++j)
tdatav[i][j] = tin[it.iofs(j,i)];
plan->forward(tdatav, fct);
for (size_t j=0; j<vlen; ++j)
out[it.oofs(j,0)] = tdatav[0][j];
size_t i=1, i1=1, i2=len-1;
for (i=1; i<len-1; i+=2, ++i1, --i2)
for (size_t j=0; j<vlen; ++j)
{
out[it.oofs(j,i1)] = tdatav[i][j]+tdatav[i+1][j];
out[it.oofs(j,i2)] = tdatav[i][j]-tdatav[i+1][j];
}
if (i<len)
for (size_t j=0; j<vlen; ++j)
out[it.oofs(j,i1)] = tdatav[i][j];
}
#endif
while (it.remaining()>0)
{
it.advance(1);
auto tdata = reinterpret_cast<T *>(storage.data());
for (size_t i=0; i<len; ++i)
tdata[i] = tin[it.iofs(i)];
plan->forward(tdata, fct);
// Hartley order
out[it.oofs(0)] = tdata[0];
size_t i=1, i1=1, i2=len-1;
for (i=1; i<len-1; i+=2, ++i1, --i2)
{
out[it.oofs(i1)] = tdata[i]+tdata[i+1];
out[it.oofs(i2)] = tdata[i]-tdata[i+1];
}
if (i<len)
out[it.oofs(i1)] = tdata[i];
}
} // end of parallel region
fct = T(1); // factor has been applied, use 1 for remaining axes
}
}
template<typename Trafo, typename T> POCKETFFT_NOINLINE void general_dcst(
const cndarr<T> &in, ndarr<T> &out, const shape_t &axes,
T fct, bool ortho, size_t POCKETFFT_NTHREADS)
{
shared_ptr<Trafo> plan;
for (size_t iax=0; iax<axes.size(); ++iax)
{
constexpr auto vlen = VLEN<T>::val;
size_t len=in.shape(axes[iax]);
if ((!plan) || (len!=plan->length()))
plan = get_plan<Trafo>(len);
#ifdef POCKETFFT_OPENMP
#pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axes[iax]))
#endif
{
auto storage = alloc_tmp<T>(in.shape(), len, sizeof(T));
const auto &tin(iax==0 ? in : out);
multi_iter<vlen> it(tin, out, axes[iax]);
#ifndef POCKETFFT_NO_VECTORS
if (vlen>1)
while (it.remaining()>=vlen)
{
using vtype = typename VTYPE<T>::type;
it.advance(vlen);
auto tdatav = reinterpret_cast<vtype *>(storage.data());
for (size_t i=0; i<len; ++i)
for (size_t j=0; j<vlen; ++j)
tdatav[i][j] = tin[it.iofs(j,i)];
plan->exec(tdatav, fct, ortho);
for (size_t i=0; i<len; ++i)
for (size_t j=0; j<vlen; ++j)
out[it.oofs(j,i)] = tdatav[i][j];
}
#endif
while (it.remaining()>0)
{
it.advance(1);
auto tdata = reinterpret_cast<T *>(storage.data());
if ((&tin[0]==&out[0]) && (it.stride_out()==sizeof(T))) // fully in-place
plan->exec(&out[it.oofs(0)], fct, ortho);
else if (it.stride_out()==sizeof(T)) // compute FFT in output location
{
for (size_t i=0; i<len; ++i)
out[it.oofs(i)] = tin[it.iofs(i)];
plan->exec(&out[it.oofs(0)], fct, ortho);
}
else
{
for (size_t i=0; i<len; ++i)
tdata[i] = tin[it.iofs(i)];
plan->exec(tdata, fct, ortho);
for (size_t i=0; i<len; ++i)
out[it.oofs(i)] = tdata[i];
}
}
} // end of parallel region
fct = T(1); // factor has been applied, use 1 for remaining axes
}
}
template<typename T> POCKETFFT_NOINLINE void general_r2c(
const cndarr<T> &in, ndarr<cmplx<T>> &out, size_t axis, bool forward, T fct,
size_t POCKETFFT_NTHREADS)
{
auto plan = get_plan<pocketfft_r<T>>(in.shape(axis));
constexpr auto vlen = VLEN<T>::val;
size_t len=in.shape(axis);
#ifdef POCKETFFT_OPENMP
#pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axis))
#endif
{
auto storage = alloc_tmp<T>(in.shape(), len, sizeof(T));
multi_iter<vlen> it(in, out, axis);
#ifndef POCKETFFT_NO_VECTORS
if (vlen>1)
while (it.remaining()>=vlen)
{
using vtype = typename VTYPE<T>::type;
it.advance(vlen);
auto tdatav = reinterpret_cast<vtype *>(storage.data());
for (size_t i=0; i<len; ++i)
for (size_t j=0; j<vlen; ++j)
tdatav[i][j] = in[it.iofs(j,i)];
plan->forward(tdatav, fct);
for (size_t j=0; j<vlen; ++j)
out[it.oofs(j,0)].Set(tdatav[0][j]);
size_t i=1, ii=1;
if (forward)
for (; i<len-1; i+=2, ++ii)
for (size_t j=0; j<vlen; ++j)
out[it.oofs(j,ii)].Set(tdatav[i][j], tdatav[i+1][j]);
else
for (; i<len-1; i+=2, ++ii)
for (size_t j=0; j<vlen; ++j)
out[it.oofs(j,ii)].Set(tdatav[i][j], -tdatav[i+1][j]);
if (i<len)
for (size_t j=0; j<vlen; ++j)
out[it.oofs(j,ii)].Set(tdatav[i][j]);
}
#endif
while (it.remaining()>0)
{
it.advance(1);
auto tdata = reinterpret_cast<T *>(storage.data());
for (size_t i=0; i<len; ++i)
tdata[i] = in[it.iofs(i)];
plan->forward(tdata, fct);
out[it.oofs(0)].Set(tdata[0]);
size_t i=1, ii=1;
if (forward)
for (; i<len-1; i+=2, ++ii)
out[it.oofs(ii)].Set(tdata[i], tdata[i+1]);
else
for (; i<len-1; i+=2, ++ii)
out[it.oofs(ii)].Set(tdata[i], -tdata[i+1]);
if (i<len)
out[it.oofs(ii)].Set(tdata[i]);
}
} // end of parallel region
}
template<typename T> POCKETFFT_NOINLINE void general_c2r(
const cndarr<cmplx<T>> &in, ndarr<T> &out, size_t axis, bool forward, T fct,
size_t POCKETFFT_NTHREADS)
{
auto plan = get_plan<pocketfft_r<T>>(out.shape(axis));
constexpr auto vlen = VLEN<T>::val;
size_t len=out.shape(axis);
#ifdef POCKETFFT_OPENMP
#pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axis))
#endif
{
auto storage = alloc_tmp<T>(out.shape(), len, sizeof(T));
multi_iter<vlen> it(in, out, axis);
#ifndef POCKETFFT_NO_VECTORS
if (vlen>1)
while (it.remaining()>=vlen)
{
using vtype = typename VTYPE<T>::type;
it.advance(vlen);
auto tdatav = reinterpret_cast<vtype *>(storage.data());
for (size_t j=0; j<vlen; ++j)
tdatav[0][j]=in[it.iofs(j,0)].r;
{
size_t i=1, ii=1;
if (forward)
for (; i<len-1; i+=2, ++ii)
for (size_t j=0; j<vlen; ++j)
{
tdatav[i ][j] = in[it.iofs(j,ii)].r;
tdatav[i+1][j] = -in[it.iofs(j,ii)].i;
}
else
for (; i<len-1; i+=2, ++ii)
for (size_t j=0; j<vlen; ++j)
{
tdatav[i ][j] = in[it.iofs(j,ii)].r;
tdatav[i+1][j] = in[it.iofs(j,ii)].i;
}
if (i<len)
for (size_t j=0; j<vlen; ++j)
tdatav[i][j] = in[it.iofs(j,ii)].r;
}
plan->backward(tdatav, fct);
for (size_t i=0; i<len; ++i)
for (size_t j=0; j<vlen; ++j)
out[it.oofs(j,i)] = tdatav[i][j];
}
#endif
while (it.remaining()>0)
{
it.advance(1);
auto tdata = reinterpret_cast<T *>(storage.data());
tdata[0]=in[it.iofs(0)].r;
{
size_t i=1, ii=1;
if (forward)
for (; i<len-1; i+=2, ++ii)
{
tdata[i ] = in[it.iofs(ii)].r;
tdata[i+1] = -in[it.iofs(ii)].i;
}
else
for (; i<len-1; i+=2, ++ii)
{
tdata[i ] = in[it.iofs(ii)].r;
tdata[i+1] = in[it.iofs(ii)].i;
}
if (i<len)
tdata[i] = in[it.iofs(ii)].r;
}
plan->backward(tdata, fct);
for (size_t i=0; i<len; ++i)
out[it.oofs(i)] = tdata[i];
}
} // end of parallel region
}
template<typename T> POCKETFFT_NOINLINE void general_r(
const cndarr<T> &in, ndarr<T> &out, const shape_t &axes, bool r2c,
bool forward, T fct, size_t POCKETFFT_NTHREADS)
{
shared_ptr<pocketfft_r<T>> plan;
for (size_t iax=0; iax<axes.size(); ++iax)
{
constexpr auto vlen = VLEN<T>::val;
size_t len=in.shape(axes[iax]);
if ((!plan) || (len!=plan->length()))
plan = get_plan<pocketfft_r<T>>(len);
#ifdef POCKETFFT_OPENMP
#pragma omp parallel num_threads(util::thread_count(nthreads, in.shape(), axes[iax]))
#endif
{
auto storage = alloc_tmp<T>(in.shape(), len, sizeof(T));
const auto &tin(iax==0 ? in : out);
multi_iter<vlen> it(tin, out, axes[iax]);
#ifndef POCKETFFT_NO_VECTORS
if (vlen>1)
while (it.remaining()>=vlen)
{
using vtype = typename VTYPE<T>::type;
it.advance(vlen);
auto tdatav = reinterpret_cast<vtype *>(storage.data());
for (size_t i=0; i<len; ++i)
for (size_t j=0; j<vlen; ++j)
tdatav[i][j] = tin[it.iofs(j,i)];
if ((!r2c) && forward)
for (size_t i=2; i<len; i+=2)
for (size_t j=0; j<vlen; ++j)
tdatav[i][j] = -tdatav[i][j];
forward ? plan->forward (tdatav, fct)
: plan->backward(tdatav, fct);
if (r2c && (!forward))
for (size_t i=2; i<len; i+=2)
for (size_t j=0; j<vlen; ++j)
tdatav[i][j] = -tdatav[i][j];
for (size_t i=0; i<len; ++i)
for (size_t j=0; j<vlen; ++j)
out[it.oofs(j,i)] = tdatav[i][j];
}
#endif
while (it.remaining()>0)
{
it.advance(1);
auto tdata = reinterpret_cast<T *>(storage.data());
if ((&tin[0]==&out[0]) && (it.stride_out()==sizeof(T))) // fully in-place
{
if ((!r2c) && forward)
for (size_t i=2; i<len; i+=2)
out[it.oofs(i)] = -out[it.oofs(i)];
forward ? plan->forward (&out[it.oofs(0)], fct)
: plan->backward(&out[it.oofs(0)], fct);
if (r2c && (!forward))
for (size_t i=2; i<len; i+=2)
out[it.oofs(i)] = -out[it.oofs(i)];
}
else if (it.stride_out()==sizeof(T)) // compute FFT in output location
{
for (size_t i=0; i<len; ++i)
out[it.oofs(i)] = tin[it.iofs(i)];
if ((!r2c) && forward)
for (size_t i=2; i<len; i+=2)
out[it.oofs(i)] = -out[it.oofs(i)];
forward ? plan->forward (&out[it.oofs(0)], fct)
: plan->backward(&out[it.oofs(0)], fct);
if (r2c && (!forward))
for (size_t i=2; i<len; i+=2)
out[it.oofs(i)] = -out[it.oofs(i)];
}
else
{
for (size_t i=0; i<len; ++i)
tdata[i] = tin[it.iofs(i)];
if ((!r2c) && forward)
for (size_t i=2; i<len; i+=2)
tdata[i] = -tdata[i];
forward ? plan->forward (tdata, fct) : plan->backward(tdata, fct);
if (r2c && (!forward))
for (size_t i=2; i<len; i+=2)
tdata[i] = -tdata[i];
for (size_t i=0; i<len; ++i)
out[it.oofs(i)] = tdata[i];
}
}
} // end of parallel region
fct = T(1); // factor has been applied, use 1 for remaining axes
}
}
#undef POCKETFFT_NTHREADS
template<typename T> void c2c(const shape_t &shape, const stride_t &stride_in,
const stride_t &stride_out, const shape_t &axes, bool forward,
const complex<T> *data_in, complex<T> *data_out, T fct,
size_t nthreads=1)
{
if (util::prod(shape)==0) return;
util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
cndarr<cmplx<T>> ain(data_in, shape, stride_in);
ndarr<cmplx<T>> aout(data_out, shape, stride_out);
general_c(ain, aout, axes, forward, fct, nthreads);
}
template<typename T> void dct(const shape_t &shape,
const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
int type, const T *data_in, T *data_out, T fct, bool ortho, size_t nthreads=1)
{
if ((type<1) || (type>4)) throw invalid_argument("invalid DCT type");
if (util::prod(shape)==0) return;
util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
cndarr<T> ain(data_in, shape, stride_in);
ndarr<T> aout(data_out, shape, stride_out);
if (type==1)
general_dcst<T_dct1<T>>(ain, aout, axes, fct, ortho, nthreads);
else if (type==2)
general_dcst<T_dct2<T>>(ain, aout, axes, fct, ortho, nthreads);
else if (type==3)
general_dcst<T_dct3<T>>(ain, aout, axes, fct, ortho, nthreads);
else if (type==4)
general_dcst<T_dct4<T>>(ain, aout, axes, fct, ortho, nthreads);
else
throw runtime_error("unsupported DCT type");
}
template<typename T> void dst(const shape_t &shape,
const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
int type, const T *data_in, T *data_out, T fct, bool ortho, size_t nthreads=1)
{
if ((type<1) || (type>4)) throw invalid_argument("invalid DST type");
if (util::prod(shape)==0) return;
util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
cndarr<T> ain(data_in, shape, stride_in);
ndarr<T> aout(data_out, shape, stride_out);
if (type==1)
general_dcst<T_dst1<T>>(ain, aout, axes, fct, ortho, nthreads);
else if (type==2)
general_dcst<T_dst2<T>>(ain, aout, axes, fct, ortho, nthreads);
else if (type==3)
general_dcst<T_dst3<T>>(ain, aout, axes, fct, ortho, nthreads);
else if (type==4)
general_dcst<T_dst4<T>>(ain, aout, axes, fct, ortho, nthreads);
else
throw runtime_error("unsupported DST type");
}
template<typename T> void r2c(const shape_t &shape_in,
const stride_t &stride_in, const stride_t &stride_out, size_t axis,
bool forward, const T *data_in, complex<T> *data_out, T fct,
size_t nthreads=1)
{
if (util::prod(shape_in)==0) return;
util::sanity_check(shape_in, stride_in, stride_out, false, axis);
cndarr<T> ain(data_in, shape_in, stride_in);
shape_t shape_out(shape_in);
shape_out[axis] = shape_in[axis]/2 + 1;
ndarr<cmplx<T>> aout(data_out, shape_out, stride_out);
general_r2c(ain, aout, axis, forward, fct, nthreads);
}
template<typename T> void r2c(const shape_t &shape_in,
const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
bool forward, const T *data_in, complex<T> *data_out, T fct,
size_t nthreads=1)
{
if (util::prod(shape_in)==0) return;
util::sanity_check(shape_in, stride_in, stride_out, false, axes);
r2c(shape_in, stride_in, stride_out, axes.back(), forward, data_in, data_out,
fct, nthreads);
if (axes.size()==1) return;
shape_t shape_out(shape_in);
shape_out[axes.back()] = shape_in[axes.back()]/2 + 1;
auto newaxes = shape_t{axes.begin(), --axes.end()};
c2c(shape_out, stride_out, stride_out, newaxes, forward, data_out, data_out,
T(1), nthreads);
}
template<typename T> void c2r(const shape_t &shape_out,
const stride_t &stride_in, const stride_t &stride_out, size_t axis,
bool forward, const complex<T> *data_in, T *data_out, T fct,
size_t nthreads=1)
{
if (util::prod(shape_out)==0) return;
util::sanity_check(shape_out, stride_in, stride_out, false, axis);
shape_t shape_in(shape_out);
shape_in[axis] = shape_out[axis]/2 + 1;
cndarr<cmplx<T>> ain(data_in, shape_in, stride_in);
ndarr<T> aout(data_out, shape_out, stride_out);
general_c2r(ain, aout, axis, forward, fct, nthreads);
}
template<typename T> void c2r(const shape_t &shape_out,
const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
bool forward, const complex<T> *data_in, T *data_out, T fct,
size_t nthreads=1)
{
if (util::prod(shape_out)==0) return;
if (axes.size()==1)
return c2r(shape_out, stride_in, stride_out, axes[0], forward,
data_in, data_out, fct, nthreads);
util::sanity_check(shape_out, stride_in, stride_out, false, axes);
auto shape_in = shape_out;
shape_in[axes.back()] = shape_out[axes.back()]/2 + 1;
auto nval = util::prod(shape_in);
stride_t stride_inter(shape_in.size());
stride_inter.back() = sizeof(cmplx<T>);
for (int i=int(shape_in.size())-2; i>=0; --i)
stride_inter[size_t(i)] =
stride_inter[size_t(i+1)]*ptrdiff_t(shape_in[size_t(i+1)]);
arr<complex<T>> tmp(nval);
auto newaxes = shape_t({axes.begin(), --axes.end()});
c2c(shape_in, stride_in, stride_inter, newaxes, forward, data_in, tmp.data(),
T(1), nthreads);
c2r(shape_out, stride_inter, stride_out, axes.back(), forward,
tmp.data(), data_out, fct, nthreads);
}
template<typename T> void r2r_fftpack(const shape_t &shape,
const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
bool real2hermitian, bool forward, const T *data_in, T *data_out, T fct,
size_t nthreads=1)
{
if (util::prod(shape)==0) return;
util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
cndarr<T> ain(data_in, shape, stride_in);
ndarr<T> aout(data_out, shape, stride_out);
general_r(ain, aout, axes, real2hermitian, forward, fct, nthreads);
}
template<typename T> void r2r_separable_hartley(const shape_t &shape,
const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
const T *data_in, T *data_out, T fct, size_t nthreads=1)
{
if (util::prod(shape)==0) return;
util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
cndarr<T> ain(data_in, shape, stride_in);
ndarr<T> aout(data_out, shape, stride_out);
general_hartley(ain, aout, axes, fct, nthreads);
}
} // namespace detail
using detail::FORWARD;
using detail::BACKWARD;
using detail::shape_t;
using detail::stride_t;
using detail::c2c;
using detail::c2r;
using detail::r2c;
using detail::r2r_fftpack;
using detail::r2r_separable_hartley;
using detail::dct;
using detail::dst;
} // namespace pocketfft
#undef POCKETFFT_NOINLINE
#undef POCKETFFT_RESTRICT
#endif // POCKETFFT_HDRONLY_H
|
ex03.c | #include <stdio.h>
#include <omp.h>
static long num_steps = 1000000;
double step;
#define PAD 8 // Assuming 64 Byte L1 cache line
int main(int argv, char* argc)
{
int num_threads;
double pi, total_sum = 0.0;
step = 1.0 / (double) num_steps;
int num_procs = omp_get_num_procs();
// omp_set_num_threads(num_procs);
double* sum;
int steps_per_thread;
// int num_threads = omp_get_num_threads(); // Sequential section always returns 1 thread -> Move to parallel section
double startTime = omp_get_wtime();
#pragma omp parallel
{
#pragma omp single
{
num_threads = omp_get_num_threads();
steps_per_thread = num_steps / num_threads;
sum = (double*) malloc(sizeof(double) * num_threads * PAD);
printf ("Found %d CPUs. Using %d threads and computing %d steps per thread.\n", num_procs, num_threads, steps_per_thread);
// Implicit barrier at the end
}
int i, id = omp_get_thread_num();
printf("Executing thread %d out of %d\n", id, num_threads);
double x;
for (i = id * steps_per_thread; i < (id + 1) * steps_per_thread; i++)
{
x = (i + 0.5) * step;
sum[id * PAD] += 4.0 / (1.0 + x * x);
}
}
int i;
for (i = 0; i < num_procs; i++)
total_sum += sum[i * PAD];
pi = step * total_sum;
double endTime = omp_get_wtime();
printf ("Computed integral: %f\n", pi);
printf ("Time elapsed: %f secs\n", (endTime - startTime));
return 0;
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(8*t1+Ny+7,32)),floord(16*t2+Ny+3,32)),floord(16*t1-16*t2+Nz+Ny+5,32));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-115,128)),ceild(32*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(8*t1+Nx+7,128)),floord(16*t2+Nx+3,128)),floord(32*t3+Nx+19,128)),floord(16*t1-16*t2+Nz+Nx+5,128));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),8*t3+6),32*t4+30);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
nvector_openmpdev.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP DEV implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmpdev.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v)
{
return SUNDIALS_NVEC_OPENMPDEV;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length)
{
N_Vector v;
N_VectorContent_OpenMPDEV content;
/* Create an empty vector object */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV;
v->ops->nvclone = N_VClone_OpenMPDEV;
v->ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV;
v->ops->nvdestroy = N_VDestroy_OpenMPDEV;
v->ops->nvspace = N_VSpace_OpenMPDEV;
v->ops->nvgetlength = N_VGetLength_OpenMPDEV;
v->ops->nvgetarraypointer = N_VGetHostArrayPointer_OpenMPDEV;
v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_OpenMPDEV;
v->ops->nvprint = N_VPrint_OpenMPDEV;
v->ops->nvprintfile = N_VPrintFile_OpenMPDEV;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_OpenMPDEV;
v->ops->nvconst = N_VConst_OpenMPDEV;
v->ops->nvprod = N_VProd_OpenMPDEV;
v->ops->nvdiv = N_VDiv_OpenMPDEV;
v->ops->nvscale = N_VScale_OpenMPDEV;
v->ops->nvabs = N_VAbs_OpenMPDEV;
v->ops->nvinv = N_VInv_OpenMPDEV;
v->ops->nvaddconst = N_VAddConst_OpenMPDEV;
v->ops->nvdotprod = N_VDotProd_OpenMPDEV;
v->ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV;
v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV;
v->ops->nvmin = N_VMin_OpenMPDEV;
v->ops->nvwl2norm = N_VWL2Norm_OpenMPDEV;
v->ops->nvl1norm = N_VL1Norm_OpenMPDEV;
v->ops->nvcompare = N_VCompare_OpenMPDEV;
v->ops->nvinvtest = N_VInvTest_OpenMPDEV;
v->ops->nvconstrmask = N_VConstrMask_OpenMPDEV;
v->ops->nvminquotient = N_VMinQuotient_OpenMPDEV;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction operations */
v->ops->nvdotprodlocal = N_VDotProd_OpenMPDEV;
v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMPDEV;
v->ops->nvminlocal = N_VMin_OpenMPDEV;
v->ops->nvl1normlocal = N_VL1Norm_OpenMPDEV;
v->ops->nvinvtestlocal = N_VInvTest_OpenMPDEV;
v->ops->nvconstrmasklocal = N_VConstrMask_OpenMPDEV;
v->ops->nvminquotientlocal = N_VMinQuotient_OpenMPDEV;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMPDEV;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMPDEV;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = length;
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMPDEV(sunindextype length)
{
N_Vector v;
realtype *data;
realtype *dev_data;
int dev;
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Update ownership */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if (data == NULL) { N_VDestroy(v); return(NULL); }
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if (dev_data == NULL) { N_VDestroy(v); return(NULL); }
/* Attach data */
NV_DATA_HOST_OMPDEV(v) = data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata,
realtype *d_vdata)
{
N_Vector v;
int dev, host;
if (h_vdata == NULL || d_vdata == NULL) return(NULL);
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
if (length > 0) {
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Attach data */
NV_OWN_DATA_OMPDEV(v) = SUNFALSE;
NV_DATA_HOST_OMPDEV(v) = h_vdata;
NV_DATA_DEV_OMPDEV(v) = d_vdata;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMPDEV(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMPDEV(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMPDEV(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMPDEV(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMPDEV
*/
void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMPDEV(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMPDEV(N_Vector v)
{
return NV_LENGTH_OMPDEV(v);
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the host.
*/
realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_HOST_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the device.
*/
realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_DEV_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMPDEV(N_Vector x)
{
N_VPrintFile_OpenMPDEV(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMPDEV(x);
xd = NV_DATA_HOST_OMPDEV(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
fprintf(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
fprintf(outfile, "%11.8g\n", xd[i]);
#else
fprintf(outfile, "%11.8g\n", xd[i]);
#endif
}
fprintf(outfile, "\n");
return;
}
/* ----------------------------------------------------------------------------
* Function to copy host array into device array
*/
void N_VCopyToDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from host to device */
omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host);
return;
}
/* ----------------------------------------------------------------------------
* Function to copy device array into host array
*/
void N_VCopyFromDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from device to host */
omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev);
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w)
{
N_Vector v;
N_VectorContent_OpenMPDEV content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = NV_LENGTH_OMPDEV(w);
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMPDEV(N_Vector w)
{
N_Vector v;
realtype *data;
realtype *dev_data;
sunindextype length;
int dev;
v = NULL;
v = N_VCloneEmpty_OpenMPDEV(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMPDEV(w);
/* Create data */
if (length > 0) {
/* Update ownership flag */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if (data == NULL) { N_VDestroy(v); return(NULL); }
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if (dev_data == NULL) { N_VDestroy(v); return(NULL); }
/* Attach data */
NV_DATA_HOST_OMPDEV(v)= data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMPDEV(N_Vector v)
{
int dev;
if (v == NULL) return;
/* free content */
if (v->content != NULL) {
/* free data arrays if they are owned by the vector */
if (NV_OWN_DATA_OMPDEV(v)) {
if (NV_DATA_HOST_OMPDEV(v) != NULL) {
free(NV_DATA_HOST_OMPDEV(v));
NV_DATA_HOST_OMPDEV(v) = NULL;
}
if (NV_DATA_DEV_OMPDEV(v) != NULL) {
dev = omp_get_default_device();
omp_target_free(NV_DATA_DEV_OMPDEV(v), dev);
NV_DATA_DEV_OMPDEV(v) = NULL;
}
}
free(v->content);
v->content = NULL;
}
/* free ops and vector */
if (v->ops != NULL) { free(v->ops); v->ops = NULL; }
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMPDEV(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd_dev, *yd_dev, *zd_dev;
N_Vector v1, v2;
booleantype test;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMPDEV(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMPDEV(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMPDEV(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMPDEV(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMPDEV(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMPDEV(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMPDEV(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMPDEV(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMPDEV(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd_dev;
int dev;
zd_dev = NULL;
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++) zd_dev[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]*yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]/yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMPDEV(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMPDEV(x, z);
} else if (c == -ONE) {
VNeg_OpenMPDEV(x, z);
} else {
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*xd_dev[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = SUNRabs(xd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = ONE/xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
sum = ZERO;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += xd_dev[i]*yd_dev[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype max, *xd_dev;
int dev;
max = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:max) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1)
for (i = 0; i < N; i++) {
max = SUNMAX(SUNRabs(xd_dev[i]), max);
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w)
{
return(SUNRsqrt(N_VWSqrSumLocal_OpenMPDEV(x, w)/(NV_LENGTH_OMPDEV(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id)
{
return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMPDEV(x, w, id) / (NV_LENGTH_OMPDEV(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a vector
*/
realtype N_VWSqrSumLocal_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a masked vector
*/
realtype N_VWSqrSumMaskLocal_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev, *idd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = idd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
if (idd_dev[i] > ZERO) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype min, *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(from:min) is_device_ptr(xd_dev) device(dev)
#pragma omp teams num_teams(1)
{
min = xd_dev[0];
#pragma omp distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 1; i < N; i++) {
min = SUNMIN(xd_dev[i], min);
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd_dev;
int dev;
sum = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:sum) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i<N; i++)
sum += SUNRabs(xd_dev[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (SUNRabs(xd_dev[i]) >= c) ? ONE : ZERO;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev, val;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
val = ZERO;
#pragma omp target map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1)
for (i = 0; i < N; i++) {
if (xd_dev[i] == ZERO)
val = ONE;
else
zd_dev[i] = ONE/xd_dev[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd_dev, *xd_dev, *md_dev;
int dev;
cd_dev = xd_dev = md_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
cd_dev = NV_DATA_DEV_OMPDEV(c);
md_dev = NV_DATA_DEV_OMPDEV(m);
/* get default device identifier */
dev = omp_get_default_device();
temp = ONE;
#pragma omp target map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1)
for (i = 0; i < N; i++) {
md_dev[i] = ZERO;
if (cd_dev[i] == ZERO) continue;
if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) {
if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; }
continue;
}
if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) {
if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; }
}
}
if (temp == ONE) return (SUNTRUE);
else return(SUNFALSE);
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd_dev, *dd_dev, min;
int dev;
nd_dev = dd_dev = NULL;
N = NV_LENGTH_OMPDEV(num);
nd_dev = NV_DATA_DEV_OMPDEV(num);
dd_dev = NV_DATA_DEV_OMPDEV(denom);
/* get default device identifier */
dev = omp_get_default_device();
min = BIG_REAL;
#pragma omp target map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 0; i < N; i++)
if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min);
return(min);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i, dev;
realtype to_add; /* temporary variable to hold sum being added in atomic operation */
sunindextype j, N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store X dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
{
#pragma omp teams distribute parallel for schedule(static,1)
for (j=0; j<N; j++)
zd_dev[j] *= c[0];
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
xd_dev = NV_DATA_DEV_OMPDEV(X[0]);
#pragma omp target map(to:N,c[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
{
#pragma omp teams distribute parallel for schedule(static, 1)
for (j=0; j<N; j++) {
zd_dev[j] = c[0] * xd_dev[j];
}
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a[i] * xd_dev[j];
}
}
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j];
}
}
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMPDEV(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/* compute multiple dot products */
#pragma omp target map(to:N,nvec,yd_dev_ptrs[:nvec]) map(tofrom:dotprods[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
for (j=0; j<N; j++)
sum += xd_dev[j] * yd_dev[j];
dotprods[i] += sum;
}
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMPDEV(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i, dev;
sunindextype j, N;
N_Vector* V1;
N_Vector* V2;
booleantype test;
realtype c;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMPDEV(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMPDEV(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMPDEV(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMPDEV(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp target map(to:N,nvec,a,b,xd_dev_ptrs[:nvec], yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a * xd_dev[j] + b * yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++) {
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
}
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
xd_dev[j] *= c[i];
}
}
free(xd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c[i] * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VConstVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* zd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMPDEV(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get device */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* set each vector in the vector array to a constant */
#pragma omp target map(to:N,nvec,zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c;
}
}
free(zd_dev_ptrs);
return(0);
}
int N_VWrmsNormVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMPDEV(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(wd_dev_ptrs);
free(xd_dev_ptrs);
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype* idd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMPDEV(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMPDEV(X[0]);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(idd_dev,xd_dev,wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++) {
if (idd_dev[j] > ZERO)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(xd_dev_ptrs);
free(wd_dev_ptrs);
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j, dev;
sunindextype k, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector *) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMPDEV(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
yd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Y[j][i]);
}
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
yd_dev[k] += a[j] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
zd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Z[j][i]);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
zd_dev = zd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
int dev;
realtype* ctmp;
N_Vector* Y;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMPDEV(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMPDEV(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMPDEV(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMPDEV(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (j=0; j<nvec; j++)
zd_dev_ptrs[j] = NV_DATA_DEV_OMPDEV(Z[j]);
for (j=0; j<nvec; j++) {
for (i=0; i<nsum; i++)
xd_dev_ptrs[j * nsum + i] = NV_DATA_DEV_OMPDEV(X[i][j]);
}
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] *= c[0];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd_dev = xd_dev_ptrs[j*nsum];
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = c[0] * xd_dev[k];
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* private functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = -xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]+yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]-yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
if (a == ONE) {
#pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += xd_dev[i];
return;
}
if (a == -ONE) {
#pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] -= xd_dev[i];
return;
}
#pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += a*xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
xd_dev[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] + yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev ointer to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] - yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
if (a == ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
if (a == -ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] -= xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
omp-sumof-elements-reduction.c |
/**************************************************************************
Example 1.5 : omp-sumof-elements-reduction.c
Objective : Write an OpenMP program to print Sum of Elements of Array
This example demonstrates the use of
PARALLEL FOR DIRECTIVE and Reduction clause
In a Reduction we repeatedly apply a binary operator to a variable and some
other value and store the result back in the variable .
Input : Size of an Array
Number of threads
Output : The Master thread prints Sum of Elements of the array.
Created : Aug 2011
Author : RarchK
*********************************************************************************/
#include<stdio.h>
#include<omp.h>
#include<stdlib.h>
/* Main Program */
main(int argc , char **argv)
{
float *array_A, sum, *checkarray, serialsum;
int arraysize, i, k, Noofthreads;
printf("\n\t\t---------------------------------------------------------------------------");
printf("\n\t\t Email : RarchK");
printf("\n\t\t---------------------------------------------------------------------------");
printf("\n\t\t Objective : Find the Sum of elements of one-dimensional real array ");
printf("\n\t\t using OpenMP Parallel for directive and Reduction Clause ");
printf("\n\t\t..........................................................................\n");
/* Checking for command line arguments */
if( argc != 3 ){
printf("\t\t Very Few Arguments\n ");
printf("\t\t Syntax : exec <Threads> <array-size> \n");
exit(-1);
}
Noofthreads=atoi(argv[1]);
if ((Noofthreads!=1) && (Noofthreads!=2) && (Noofthreads!=4) && (Noofthreads!=8) && (Noofthreads!= 16) ) {
printf("\n\t\t Number of threads should be 1,2,4,8 or 16 for the execution of program. \n\n");
exit(-1);
}
arraysize=atoi(argv[2]);
/*printf("\n\t\t Enter the size of the array \n");
scanf("%d", &arraysize);*/
if (arraysize <= 0) {
printf("\n\t\t Positive Number Required\n");
exit(1);
}
printf("\n\t\t Threads : %d ",Noofthreads);
printf("\n\t\t Array Size : %d",arraysize);
/* Dynamic Memory Allocation */
array_A = (float *) malloc(sizeof(float) * arraysize);
checkarray = (float *) malloc(sizeof(float) * arraysize);
/* Array Elements Initialization */
for (i = 0; i < arraysize; i++) {
array_A[i] = i + 5;
checkarray[i] = array_A[i];
}
sum = 0.0;
/* Set the number of threads */
omp_set_num_threads(Noofthreads);
/* OpenMP Parallel For With Reduction Clause :
In a Reduction we repeatedly apply a binary operator to a variable and some
other value and store the result back in the variable .
Here the Reduction will apply on to the sum variable.Each thread calculate
its partial sum of the array elements and the final sum will store in sum variable.
*/
#pragma omp parallel for reduction(+ : sum)
for (i = 0; i < arraysize; i++)
{
sum = sum + array_A[i];
}
/* Serial Calculation */
serialsum = 0.0;
for (i = 0; i < arraysize; i++)
serialsum = serialsum +checkarray[i];
/* Output Checking */
if (serialsum != sum) {
printf("\n\n\t\t The parallel calculation of array sum is different from serial calculation \n");
exit(-1);
} else
printf("\n\n\t\t The parallel calculation of array sum is same with serial calculation \n");
/* Freeing Memory Which Was Allocated */
free(checkarray);
free(array_A);
printf("\n\t\t The SumOfElements Of The Array Using OpenMP Directives Is %f\n", sum);
printf("\t\t The SumOfElements Of The Array By Serial Calculation Is %f\n\n", serialsum);
}
|
GB_binop__lxor_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int8)
// A*D function (colscale): GB (_AxD__lxor_int8)
// D*A function (rowscale): GB (_DxB__lxor_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int8)
// C=scalar+B GB (_bind1st__lxor_int8)
// C=scalar+B' GB (_bind1st_tran__lxor_int8)
// C=A+scalar GB (_bind2nd__lxor_int8)
// C=A'+scalar GB (_bind2nd_tran__lxor_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_INT8 || GxB_NO_LXOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:10:1, col:21>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:17:1, col:33>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:22, col:32>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:31> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:31> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:24:1, col:33>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:22, col:32>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:31> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:31> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPForSimdDirective {{.*}} <line:31:1, col:33>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:22, col:32>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:31> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:31> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
trsm_x_bsr_n_hi_row.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
const ALPHA_INT num_thread = alpha_get_thread_num();
const ALPHA_INT bs = A->block_size;
ALPHA_Number* diag=(ALPHA_Number*) alpha_malloc(A->rows*bs*sizeof(ALPHA_Number));
const ALPHA_INT m = A->rows*bs;
const ALPHA_INT n = A->cols*bs;
// assert(m==n);
memset(diag, '\0', m * sizeof(ALPHA_Number));
const ALPHA_INT bs2 = bs * bs;
const ALPHA_INT b_rows = m / bs;
const ALPHA_INT b_cols = n / bs;
const alphasparse_layout_t block_layout = A->block_layout;
if(block_layout != ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
printf("layout not consistent!!!\n");
exit(-1);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT br = 0 ; br < b_rows; br++){
for(ALPHA_INT ai = A->rows_start[br]; ai < A->rows_end[br]; ai++){
ALPHA_INT bc = A->col_indx[ai];
if(bc == br){
for(ALPHA_INT b_row = 0 ; b_row < bs ; b_row++){
diag[index2(br,b_row,bs)] = A->values[ai * bs2 + b_row *(bs + 1)];
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
ALPHA_Number* temp = (ALPHA_Number*) alpha_malloc(bs*sizeof(ALPHA_Number));
for (ALPHA_INT br = b_rows - 1; br >= 0; br--)
{
for(ALPHA_INT i = 0 ; i < bs ; i++){
alpha_setzero(temp[i]);
}
ALPHA_INT diagBlock = -1;
for (ALPHA_INT ai = A->rows_start[br]; ai < A->rows_end[br]; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
if(bc > br)
//row-major
for(ALPHA_INT row = 0; row < bs; row++)
{
//all entities belongs to upper triangle
ALPHA_INT a0_offset = ai * bs2 + row * bs;
for(ALPHA_INT col = 0 ; col < bs ; col++)
{
ALPHA_INT y_offset = (bc * bs + col) * ldy + out_y_col;
ALPHA_INT ele_offset = a0_offset + col;
alpha_madde(temp[row], A->values[ ele_offset ] ,y[y_offset]);
}
}
//diagonal must be none-zero block
if( bc==br ){
diagBlock = ai;
}
}
if(diagBlock == -1)
{
printf("lhs matrix invalid for trsm!!!\n");
exit(-1);
}
//row-major
//right-bottom most
for(ALPHA_INT row = bs - 1; row >=0 ; row--)
{
//upper triangle of block
for(ALPHA_INT col = row + 1 ; col < bs ; col++){
ALPHA_INT y_offset = (br * bs + col) * ldy + out_y_col;
alpha_madde(temp[row] ,A->values[ diagBlock * bs2 + row * bs + col] ,y[y_offset]);
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t,alpha,x[(br * bs + row) * ldx + out_y_col] );
alpha_sub(t,t,temp[row]);
alpha_div(y[(br * bs + row) * ldy + out_y_col],t, diag[row + br * bs]);
}
}
alpha_free(temp);
}
alpha_free(diag);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
imginputfileconn.h | /**
* DeepDetect
* Copyright (c) 2014 Emmanuel Benazera
* Author: Emmanuel Benazera <beniz@droidnik.fr>
*
* This file is part of deepdetect.
*
* deepdetect is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* deepdetect is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with deepdetect. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef IMGINPUTFILECONN_H
#define IMGINPUTFILECONN_H
#include "inputconnectorstrategy.h"
#include <opencv2/opencv.hpp>
#ifdef USE_CUDA_CV
#include <opencv2/cudaimgproc.hpp>
#endif
#if CV_VERSION_MAJOR >= 3
#define CV_LOAD_IMAGE_COLOR cv::IMREAD_COLOR
#define CV_LOAD_IMAGE_GRAYSCALE cv::IMREAD_GRAYSCALE
#define CV_LOAD_IMAGE_UNCHANGED cv::IMREAD_UNCHANGED
#define CV_BGR2RGB cv::COLOR_BGR2RGB
#define CV_BGR2GRAY cv::COLOR_BGR2GRAY
#define CV_GRAY2RGB cv::COLOR_GRAY2RGB
#define CV_YCrCb2RGB cv::COLOR_YCrCb2RGB
#define CV_YCrCb2BGR cv::COLOR_YCrCb2BGR
#define CV_BGR2YCrCb cv::COLOR_BGR2YCrCb
#define CV_INTER_CUBIC cv::INTER_CUBIC
#endif
#include "ext/base64/base64.h"
#include "utils/apitools.h"
#include <random>
namespace dd
{
class DDImg
{
public:
DDImg()
{
}
~DDImg()
{
}
// base64 detection
bool is_within_base64_range(char c) const
{
if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')
|| (c >= '0' && c <= '9') || (c == '+' || c == '/' || c == '='))
return true;
else
return false;
}
bool possibly_base64(const std::string &s) const
{
bool ism = is_multiple_four(s);
if (!ism)
return false;
for (char c : s)
{
bool within_64 = is_within_base64_range(c);
if (!within_64)
return false;
}
return true;
}
bool is_multiple_four(const std::string &s) const
{
if (s.length() % 4 == 0)
return true;
else
return false;
}
void resize(const cv::Mat &src, cv::Mat &dst, const cv::Size &cvsize,
const double &fx, const double &fy) const
{
#ifdef USE_CUDA_CV
if (_cuda)
{
cv::cuda::GpuMat d_src;
d_src.upload(src);
cv::cuda::GpuMat d_dst;
cv::cuda::resize(d_src, d_dst, cvsize, fx, fy, select_cv_interp());
if (_histogram_equalization)
{
if (_bw)
{
cv::cuda::equalizeHist(d_dst, d_dst);
if (_rgb)
cv::cuda::cvtColor(d_dst, d_dst, CV_GRAY2RGB);
}
else
{
// We don't apply equalizeHist on each BGR channels to keep
// the color balance of the image. equalizeHist(V) of HSV can
// works too, the result is almost the same
cv::cuda::cvtColor(d_dst, d_dst, CV_BGR2YCrCb);
std::vector<cv::cuda::GpuMat> vec_channels;
cv::cuda::split(d_dst, vec_channels);
cv::cuda::equalizeHist(vec_channels[0], vec_channels[0]);
cv::cuda::merge(vec_channels, d_dst);
if (_rgb)
cv::cuda::cvtColor(d_dst, d_dst, CV_YCrCb2RGB);
else
cv::cuda::cvtColor(d_dst, d_dst, CV_YCrCb2BGR);
}
}
else if (_rgb)
{
if (_bw)
cv::cuda::cvtColor(d_dst, d_dst, CV_GRAY2RGB);
else:
cv::cuda::cvtColor(d_dst, d_dst, CV_BGR2RGB);
}
d_dst.download(dst);
}
else
#endif
{
cv::resize(src, dst, cvsize, fx, fy, select_cv_interp());
if (_histogram_equalization)
{
if (_bw)
{
cv::equalizeHist(dst, dst);
if (_rgb)
cv::cvtColor(dst, dst, CV_GRAY2RGB);
}
else
{
// We don't apply equalizeHist on each BGR channels to keep
// the color balance of the image. equalizeHist(V) of HSV can
// works too, the result is almost the same
cv::cvtColor(dst, dst, CV_BGR2YCrCb);
std::vector<cv::Mat> vec_channels;
cv::split(dst, vec_channels);
cv::equalizeHist(vec_channels[0], vec_channels[0]);
cv::merge(vec_channels, dst);
if (_rgb)
cv::cvtColor(dst, dst, CV_YCrCb2RGB);
else
cv::cvtColor(dst, dst, CV_YCrCb2BGR);
}
}
else if (_rgb)
{
if (_bw)
cv::cvtColor(dst, dst, CV_GRAY2RGB);
else
cv::cvtColor(dst, dst, CV_BGR2RGB);
}
}
}
void scale(const cv::Mat &src, cv::Mat &dst) const
{
float coef = std::min(
static_cast<float>(_scale_max) / std::max(src.rows, src.cols),
static_cast<float>(_scale_min) / std::min(src.rows, src.cols));
resize(src, dst, cv::Size(), coef, coef);
}
/// Apply preprocessing to image and add it to the list of images
/// img_name: name of the image as displayed in error messages
int add_image(const cv::Mat &img, const std::string &img_name)
{
if (_keep_orig)
_orig_imgs.push_back(img);
if (img.empty())
{
_logger->error("empty image {}", img_name);
return -1;
}
_imgs_size.push_back(std::pair<int, int>(img.rows, img.cols));
cv::Mat rimg;
try
{
if (_scaled)
scale(img, rimg);
else if (_width == 0 || _height == 0)
{
if (_width == 0 && _height == 0)
{
// Do nothing and keep native resolution. May cause issues if
// batched images are different resolutions
rimg = img;
}
else
{
// Resize so that the larger dimension is set to whichever
// (width or height) is non-zero, maintaining aspect ratio
// XXX - This may cause issues if batch images are different
// resolutions
size_t currMaxDim = std::max(img.rows, img.cols);
double scale = static_cast<double>(std::max(_width, _height))
/ static_cast<double>(currMaxDim);
resize(img, rimg, cv::Size(), scale, scale);
}
}
else
{
// Resize normally to the specified width and height
resize(img, rimg, cv::Size(_width, _height), 0, 0);
}
}
catch (...)
{
throw InputConnectorBadParamException("failed resizing image "
+ img_name);
}
if (_crop_width != 0 && _crop_height != 0)
{
int widthBorder = (_width - _crop_width) / 2;
int heightBorder = (_height - _crop_height) / 2;
try
{
rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width,
_crop_height));
}
catch (...)
{
throw InputConnectorBadParamException("failed cropping image "
+ img_name);
}
}
_imgs.push_back(std::move(rimg));
return 0;
}
// decode image
void decode(const std::string &str)
{
std::vector<unsigned char> vdat(str.begin(), str.end());
cv::Mat img = cv::Mat(cv::imdecode(
cv::Mat(vdat, false),
_unchanged_data
? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)));
add_image(img, "base64 image");
}
// deserialize image, independent of format
void deserialize(std::stringstream &input)
{
size_t size = 0;
input.seekg(0, input.end);
size = input.tellg();
input.seekg(0, input.beg);
char *data = new char[size];
input.read(data, size);
std::string str(data, data + size);
delete[] data;
decode(str);
}
// data acquisition
int read_file(const std::string &fname)
{
cv::Mat img
= cv::imread(fname, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE
: CV_LOAD_IMAGE_COLOR));
return add_image(img, fname);
}
int read_db(const std::string &fname)
{
_db_fname = fname;
return 0;
}
int read_mem(const std::string &content)
{
_in_mem = true;
cv::Mat timg;
_b64 = possibly_base64(content);
if (_b64)
{
std::string ccontent;
Base64::Decode(content, &ccontent);
std::stringstream sstr;
sstr << ccontent;
deserialize(sstr);
}
else
{
decode(content);
}
if (_imgs.at(0).empty())
return -1;
return 0;
}
int read_dir(const std::string &dir)
{
// list directories in dir
std::unordered_set<std::string> subdirs;
if (fileops::list_directory(dir, false, true, false, subdirs))
throw InputConnectorBadParamException(
"failed reading text subdirectories in data directory " + dir);
_logger->info("imginputfileconn: list subdirs size={}", subdirs.size());
// list files and classes
std::vector<std::pair<std::string, int>> lfiles; // labeled files
std::unordered_map<int, std::string>
hcorresp; // correspondence class number / class name
if (!subdirs.empty())
{
int cl = 0;
auto uit = subdirs.begin();
while (uit != subdirs.end())
{
std::unordered_set<std::string> subdir_files;
if (fileops::list_directory((*uit), true, false, true,
subdir_files))
throw InputConnectorBadParamException(
"failed reading image data sub-directory " + (*uit));
auto fit = subdir_files.begin();
while (fit != subdir_files.end()) // XXX: re-iterating the file
// is not optimal
{
lfiles.push_back(std::pair<std::string, int>((*fit), cl));
++fit;
}
++cl;
++uit;
}
}
else
{
std::unordered_set<std::string> test_files;
fileops::list_directory(dir, true, false, false, test_files);
auto fit = test_files.begin();
while (fit != test_files.end())
{
lfiles.push_back(
std::pair<std::string, int>((*fit), -1)); // -1 for no class
++fit;
}
}
// read images
_imgs.reserve(lfiles.size());
_img_files.reserve(lfiles.size());
_labels.reserve(lfiles.size());
for (std::pair<std::string, int> &p : lfiles)
{
cv::Mat img = cv::imread(
p.first, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE
: CV_LOAD_IMAGE_COLOR));
add_image(img, p.first);
_img_files.push_back(p.first);
if (p.second >= 0)
_labels.push_back(p.second);
if (_imgs.size() % 1000 == 0)
_logger->info("read {} images", _imgs.size());
}
return 0;
}
int select_cv_interp() const
{
if (_interp == "nearest")
return cv::INTER_NEAREST;
else if (_interp == "linear")
return cv::INTER_LINEAR;
else if (_interp == "area")
return cv::INTER_AREA;
else if (_interp == "lanczos4")
return cv::INTER_LANCZOS4;
else /* if (_interp == "cubic") */
return cv::INTER_CUBIC; // default
}
std::vector<cv::Mat> _imgs;
std::vector<cv::Mat> _orig_imgs;
std::vector<std::string> _img_files;
std::vector<std::pair<int, int>> _imgs_size;
bool _bw = false;
bool _rgb = false;
bool _histogram_equalization = false;
bool _in_mem = false;
bool _unchanged_data = false;
std::vector<int> _labels;
int _width = 224;
int _height = 224;
int _crop_width = 0;
int _crop_height = 0;
float _scale = 1.0;
bool _scaled = false;
int _scale_min = 600;
int _scale_max = 1000;
bool _keep_orig = false;
bool _b64 = false;
std::string _interp = "cubic";
#ifdef USE_CUDA_CV
bool _cuda = false;
#endif
std::string _db_fname;
std::shared_ptr<spdlog::logger> _logger;
};
class ImgInputFileConn : public InputConnectorStrategy
{
public:
ImgInputFileConn() : InputConnectorStrategy()
{
}
ImgInputFileConn(const ImgInputFileConn &i)
: InputConnectorStrategy(i), _width(i._width), _height(i._height),
_crop_width(i._crop_width), _crop_height(i._crop_height), _bw(i._bw),
_rgb(i._rgb), _unchanged_data(i._unchanged_data),
_test_split(i._test_split), _mean(i._mean),
_has_mean_scalar(i._has_mean_scalar), _scale(i._scale),
_scaled(i._scaled), _scale_min(i._scale_min),
_scale_max(i._scale_max), _keep_orig(i._keep_orig),
_interp(i._interp)
#ifdef USE_CUDA_CV
,
_cuda(i._cuda)
#endif
{
}
~ImgInputFileConn()
{
}
void init(const APIData &ad)
{
fillup_parameters(ad);
}
void fillup_parameters(const APIData &ad)
{
// optional parameters.
if (ad.has("width"))
_width = ad.get("width").get<int>();
if (ad.has("height"))
_height = ad.get("height").get<int>();
if (ad.has("crop_width"))
{
_crop_width = ad.get("crop_width").get<int>();
if (_crop_width > _width)
{
_logger->error("Crop width must be less than or equal to width");
throw InputConnectorBadParamException(
"Crop width must be less than or equal to width");
}
}
if (ad.has("crop_height"))
{
_crop_height = ad.get("crop_height").get<int>();
if (_crop_height > _height)
{
_logger->error(
"Crop height must be less than or equal to height");
throw InputConnectorBadParamException(
"Crop height must be less than or equal to height");
}
}
if (ad.has("bw"))
_bw = ad.get("bw").get<bool>();
if (ad.has("rgb"))
_rgb = ad.get("rgb").get<bool>();
if (ad.has("histogram_equalization"))
_histogram_equalization = ad.get("histogram_equalization").get<bool>();
if (ad.has("unchanged_data"))
_unchanged_data = ad.get("unchanged_data").get<bool>();
if (ad.has("shuffle"))
_shuffle = ad.get("shuffle").get<bool>();
if (ad.has("seed"))
_seed = ad.get("seed").get<int>();
if (ad.has("test_split"))
_test_split = ad.get("test_split").get<double>();
if (ad.has("mean"))
{
apitools::get_floats(ad, "mean", _mean);
_has_mean_scalar = true;
}
if (ad.has("std"))
{
apitools::get_floats(ad, "std", _std);
}
// Variable size
if (ad.has("scale"))
_scale = ad.get("scale").get<double>();
if (ad.has("scaled") || ad.has("scale_min") || ad.has("scale_max"))
_scaled = true;
if (ad.has("scale_min"))
_scale_min = ad.get("scale_min").get<int>();
if (ad.has("scale_max"))
_scale_max = ad.get("scale_max").get<int>();
// whether to keep original image (for chained ops, e.g. cropping)
if (ad.has("keep_orig"))
_keep_orig = ad.get("keep_orig").get<bool>();
// image interpolation method
if (ad.has("interp"))
_interp = ad.get("interp").get<std::string>();
// timeout
this->set_timeout(ad);
#ifdef USE_CUDA_CV
// image resizing on GPU
if (ad.has("cuda"))
_cuda = ad.get("cuda").get<bool>();
#endif
}
void copy_parameters_to(DDImg &dimg) const
{
dimg._bw = _bw;
dimg._rgb = _rgb;
dimg._histogram_equalization = _histogram_equalization;
dimg._unchanged_data = _unchanged_data;
dimg._width = _width;
dimg._height = _height;
dimg._crop_width = _crop_width;
dimg._crop_height = _crop_height;
dimg._scale = _scale;
dimg._scaled = _scaled;
dimg._scale_min = _scale_min;
dimg._scale_max = _scale_max;
dimg._keep_orig = _keep_orig;
dimg._interp = _interp;
#ifdef USE_CUDA_CV
dimg._cuda = _cuda;
#endif
dimg._logger = _logger;
}
int feature_size() const
{
if (_bw || _unchanged_data)
{
// XXX: only valid for single channels
if (_crop_width != 0 && _crop_height != 0)
return _crop_width * _crop_height;
else
return _width * _height;
}
else
{
// RGB
if (_crop_width != 0 && _crop_height != 0)
return _crop_width * _crop_height * 3;
else
return _width * _height * 3;
}
}
int batch_size() const
{
return _images.size();
}
int test_batch_size() const
{
return _test_images.size();
}
void get_data(const APIData &ad)
{
// check for raw cv::Mat
if (ad.has("data_raw_img"))
{
if (ad.has("ids"))
_ids = ad.get("ids").get<std::vector<std::string>>();
if (ad.has("meta_uris"))
_meta_uris = ad.get("meta_uris").get<std::vector<std::string>>();
if (ad.has("index_uris"))
_index_uris = ad.get("index_uris").get<std::vector<std::string>>();
_images = ad.get("data_raw_img").get<std::vector<cv::Mat>>();
std::vector<cv::Mat> rimgs;
std::vector<std::string> uris;
int i = 0;
for (auto img : _images)
{
cv::Mat rimg;
resize(img, rimg, cv::Size(_width, _height), 0, 0);
if (_bw && rimg.channels() > 1)
{
cv::Mat bwimg;
cv::cvtColor(rimg, bwimg, CV_BGR2GRAY);
rimg = bwimg;
}
_images_size.push_back(std::pair<int, int>(img.rows, img.cols));
if (_keep_orig)
_orig_images.push_back(std::move(img));
if (!_ids.empty())
uris.push_back(_ids.at(i));
else
{
_ids.push_back(std::to_string(i));
uris.push_back(_ids.back());
}
rimgs.push_back(std::move(rimg));
++i;
}
_images = rimgs;
if (!uris.empty())
_uris = uris;
}
else
InputConnectorStrategy::get_data(ad);
}
void transform(const APIData &ad)
{
if (ad.has(
"parameters")) // hotplug of parameters, overriding the defaults
{
APIData ad_param = ad.getobj("parameters");
if (ad_param.has("input"))
{
fillup_parameters(ad_param.getobj("input"));
}
}
get_data(ad);
if (!_images.empty()) // got ready raw images
{
return;
}
int catch_read = 0;
std::string catch_msg;
std::vector<std::string> uris;
std::vector<std::string> meta_uris;
std::vector<std::string> index_uris;
std::vector<std::string> failed_uris;
#pragma omp parallel for
for (size_t i = 0; i < _uris.size(); i++)
{
bool no_img = false;
std::string u = _uris.at(i);
DataEl<DDImg> dimg(this->_input_timeout);
copy_parameters_to(dimg._ctype);
try
{
if (dimg.read_element(u, this->_logger))
{
_logger->error("no data for image {}", u);
no_img = true;
}
if (!dimg._ctype._db_fname.empty())
_db_fname = dimg._ctype._db_fname;
}
catch (std::exception &e)
{
#pragma omp critical
{
++catch_read;
catch_msg = e.what();
failed_uris.push_back(u);
no_img = true;
}
}
if (no_img)
continue;
if (!_db_fname.empty())
continue;
#pragma omp critical
{
_images.insert(_images.end(),
std::make_move_iterator(dimg._ctype._imgs.begin()),
std::make_move_iterator(dimg._ctype._imgs.end()));
if (_keep_orig)
_orig_images.insert(
_orig_images.end(),
std::make_move_iterator(dimg._ctype._orig_imgs.begin()),
std::make_move_iterator(dimg._ctype._orig_imgs.end()));
_images_size.insert(
_images_size.end(),
std::make_move_iterator(dimg._ctype._imgs_size.begin()),
std::make_move_iterator(dimg._ctype._imgs_size.end()));
if (!dimg._ctype._labels.empty())
_test_labels.insert(
_test_labels.end(),
std::make_move_iterator(dimg._ctype._labels.begin()),
std::make_move_iterator(dimg._ctype._labels.end()));
if (!_ids.empty())
uris.push_back(_ids.at(i));
else if (!dimg._ctype._b64 && dimg._ctype._imgs.size() == 1)
uris.push_back(u);
else if (!dimg._ctype._img_files.empty())
uris.insert(
uris.end(),
std::make_move_iterator(dimg._ctype._img_files.begin()),
std::make_move_iterator(dimg._ctype._img_files.end()));
else
uris.push_back(std::to_string(i));
if (!_meta_uris.empty())
meta_uris.push_back(_meta_uris.at(i));
if (!_index_uris.empty())
index_uris.push_back(_index_uris.at(i));
}
}
if (catch_read)
{
for (auto s : failed_uris)
_logger->error("failed reading image {}", s);
throw InputConnectorBadParamException(catch_msg);
}
_uris = uris;
_ids = _uris; // since uris may be in different order than before
// transform
_meta_uris = meta_uris;
_index_uris = index_uris;
if (!_db_fname.empty())
return; // db filename is passed to backend
// shuffle before possible split
if (_shuffle)
{
std::mt19937 g;
if (_seed >= 0)
g = std::mt19937(_seed);
else
{
std::random_device rd;
g = std::mt19937(rd());
}
std::shuffle(_images.begin(), _images.end(),
g); // XXX beware: labels are not shuffled, i.e. let's
// not shuffle while testing
}
// split as required
if (_test_split > 0)
{
int split_size = std::floor(_images.size() * (1.0 - _test_split));
auto chit = _images.begin();
auto dchit = chit;
int cpos = 0;
while (chit != _images.end())
{
if (cpos == split_size)
{
if (dchit == _images.begin())
dchit = chit;
_test_images.push_back((*chit));
}
else
++cpos;
++chit;
}
_images.erase(dchit, _images.end());
_logger->info("data split test size={} / remaining data size={}",
_test_images.size(), _images.size());
}
if (_images.empty())
throw InputConnectorBadParamException("no image could be found");
}
// data
std::vector<cv::Mat> _images;
std::vector<cv::Mat> _orig_images; /**< stored upon request. */
std::vector<cv::Mat> _test_images;
std::vector<int> _test_labels;
std::vector<std::pair<int, int>> _images_size;
// image parameters
int _width = 224;
int _height = 224;
int _crop_width = 0;
int _crop_height = 0;
bool _bw = false; /**< whether to convert to black & white. */
bool _rgb = false; /**< whether to convert to rgb. */
bool _histogram_equalization
= false; /**< whether to apply histogram equalizer. */
bool _unchanged_data = false; /**< IMREAD_UNCHANGED flag. */
double _test_split = 0.0; /**< auto-split of the dataset. */
int _seed = -1; /**< shuffling seed. */
std::vector<float>
_mean; /**< mean image pixels, to be subtracted from images. */
std::vector<float> _std; /**< std, to divide image values. */
bool _has_mean_scalar = false; /**< whether scalar is set. */
std::string _db_fname;
double _scale = 1.0;
bool _scaled = false;
int _scale_min = 600;
int _scale_max = 1000;
bool _keep_orig = false;
std::string _interp = "cubic";
#ifdef USE_CUDA_CV
bool _cuda = false;
#endif
};
}
#ifdef USE_CAFFE
#include "caffeinputconns.h"
#endif
#ifdef USE_TF
#include "backends/tf/tfinputconns.h"
#endif
#ifdef USE_DLIB
#include "backends/dlib/dlibinputconns.h"
#endif
#ifdef USE_NCNN
#include "backends/ncnn/ncnninputconns.h"
#endif
#ifdef USE_CAFFE2
#include "backends/caffe2/caffe2inputconns.h"
#endif
#ifdef USE_TENSORRT
#include "backends/tensorrt/tensorrtinputconns.h"
#endif
#ifdef USE_TORCH
#include "backends/torch/torchinputconns.h"
#endif
#endif
|
DRB104-nowait-barrier-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This example is based on one code snippet extracted from a paper:
Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013
Explicit barrier to counteract nowait
*/
#include <stdio.h>
#include <assert.h>
int main()
{
int i,error;
int len = 1000;
int a[len], b=5;
for (i=0; i<len; i++)
a[i]= i;
#pragma omp parallel shared(b, error)
{
#pragma omp for nowait
for(i = 0; i < len; i++)
a[i] = b + a[i]*5;
#pragma omp barrier
#pragma omp single
error = a[9] + 1;
}
assert (error == 51);
printf ("error = %d\n", error);
return 0;
}
|
advection.c | #include "../../comms.h"
#include "../../shared.h"
#include "hale.h"
#include <float.h>
#include <math.h>
// Performs a remap and some scattering of the subcell values
void advection_phase(UnstructuredMesh* umesh, HaleData* hale_data) {
// Advects mass and energy through the subcell faces using swept edge approx
perform_advection(
umesh->ncells, umesh->cells_to_nodes_offsets, umesh->nodes_x0, umesh->nodes_y0,
umesh->nodes_z0, hale_data->rezoned_nodes_x, hale_data->rezoned_nodes_y,
hale_data->rezoned_nodes_z, umesh->cells_to_nodes,
umesh->faces_to_nodes_offsets, umesh->faces_to_nodes,
umesh->faces_cclockwise_cell, hale_data->subcells_to_faces_offsets,
hale_data->subcells_to_faces, hale_data->subcells_to_subcells_offsets,
hale_data->subcells_to_subcells, hale_data->subcell_centroids_x,
hale_data->subcell_centroids_y, hale_data->subcell_centroids_z,
umesh->faces_to_cells0, umesh->faces_to_cells1, hale_data->subcell_volume,
hale_data->subcell_momentum_flux_x, hale_data->subcell_momentum_flux_y,
hale_data->subcell_momentum_flux_z, hale_data->subcell_momentum_x,
hale_data->subcell_momentum_y, hale_data->subcell_momentum_z,
hale_data->subcell_mass, hale_data->subcell_mass_flux,
hale_data->subcell_ie_mass, hale_data->subcell_ie_mass_flux,
hale_data->subcell_ke_mass, hale_data->subcell_ke_mass_flux);
}
// Advects mass and energy through the subcell faces using swept edge approx
void perform_advection(
const int ncells, const int* cells_to_nodes_offsets, const double* nodes_x,
const double* nodes_y, const double* nodes_z, const double* rezoned_nodes_x,
const double* rezoned_nodes_y, const double* rezoned_nodes_z,
const int* cells_to_nodes, const int* faces_to_nodes_offsets,
const int* faces_to_nodes, const int* faces_cclockwise_cell,
const int* subcells_to_faces_offsets, const int* subcells_to_faces,
const int* subcells_to_subcells_offsets, const int* subcells_to_subcells,
const double* subcell_centroids_x, const double* subcell_centroids_y,
const double* subcell_centroids_z, const int* faces_to_cells0,
const int* faces_to_cells1, double* subcell_volume,
double* subcell_momentum_flux_x, double* subcell_momentum_flux_y,
double* subcell_momentum_flux_z, const double* subcell_momentum_x,
const double* subcell_momentum_y, const double* subcell_momentum_z,
const double* subcell_mass, double* subcell_mass_flux,
const double* subcell_ie_mass, double* subcell_ie_mass_flux,
const double* subcell_ke_mass, double* subcell_ke_mass_flux) {
#pragma omp parallel for
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell = cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
vec_t cell_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_cell, nodes_x, nodes_y, nodes_z, cells_to_nodes,
cell_to_nodes_off, &cell_c);
vec_t rz_cell_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_cell, rezoned_nodes_x, rezoned_nodes_y,
rezoned_nodes_z, cells_to_nodes, cell_to_nodes_off,
&rz_cell_c);
// Looping over corner subcells here
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
const int subcell_index = cell_to_nodes_off + nn;
const int subcell_to_faces_off =
subcells_to_faces_offsets[(subcell_index)];
const int nfaces_by_subcell =
subcells_to_faces_offsets[(subcell_index + 1)] - subcell_to_faces_off;
vec_t subcell_c = {subcell_centroids_x[(subcell_index)],
subcell_centroids_y[(subcell_index)],
subcell_centroids_z[(subcell_index)]};
// Consider all faces attached to node
for (int ff = 0; ff < nfaces_by_subcell; ++ff) {
const int face_index = subcells_to_faces[(subcell_to_faces_off + ff)];
const int face_to_nodes_off = faces_to_nodes_offsets[(face_index)];
const int nnodes_by_face =
faces_to_nodes_offsets[(face_index + 1)] - face_to_nodes_off;
const int neighbour_cc = (faces_to_cells0[(face_index)] == cc)
? faces_to_cells1[(face_index)]
: faces_to_cells0[(face_index)];
// The face centroid is the same for all nodes on the face
vec_t face_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_face, nodes_x, nodes_y, nodes_z, faces_to_nodes,
face_to_nodes_off, &face_c);
vec_t rz_face_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_face, rezoned_nodes_x, rezoned_nodes_y,
rezoned_nodes_z, faces_to_nodes, face_to_nodes_off,
&rz_face_c);
// Determine the position of the node in the face list of nodes
int nn2;
for (nn2 = 0; nn2 < nnodes_by_face; ++nn2) {
if (faces_to_nodes[(face_to_nodes_off + nn2)] == node_index) {
break;
}
}
const int face_clockwise = (faces_cclockwise_cell[(face_index)] != cc);
const int next_node = (nn2 == nnodes_by_face - 1) ? 0 : nn2 + 1;
const int prev_node = (nn2 == 0) ? nnodes_by_face - 1 : nn2 - 1;
const int rnode_off = (face_clockwise ? prev_node : next_node);
const int lnode_off = (face_clockwise ? next_node : prev_node);
const int rnode_index = faces_to_nodes[(face_to_nodes_off + rnode_off)];
const int lnode_index = faces_to_nodes[(face_to_nodes_off + lnode_off)];
const int swept_edge_to_faces[] = {0, 1, 2, 3, 4, 5};
const int swept_edge_faces_to_nodes[] = {0, 1, 2, 3, 4, 5, 6, 7,
0, 3, 7, 4, 7, 6, 2, 3,
1, 5, 6, 2, 0, 4, 5, 1};
const int swept_edge_faces_to_nodes_offsets[] = {0, 4, 8, 12,
16, 20, 24};
/* INTERNAL FACE */
const int r_face_off = (ff == nfaces_by_subcell - 1) ? 0 : ff + 1;
const int lface_off = (ff == 0) ? nfaces_by_subcell - 1 : ff - 1;
const int r_face_index =
subcells_to_faces[(subcell_to_faces_off + r_face_off)];
const int lface_index =
subcells_to_faces[(subcell_to_faces_off + lface_off)];
const int r_face_to_nodes_off = faces_to_nodes_offsets[(r_face_index)];
const int lface_to_nodes_off = faces_to_nodes_offsets[(lface_index)];
const int nnodes_by_r_face =
faces_to_nodes_offsets[(r_face_index + 1)] - r_face_to_nodes_off;
const int nnodes_by_lface =
faces_to_nodes_offsets[(lface_index + 1)] - lface_to_nodes_off;
vec_t r_iface_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_r_face, nodes_x, nodes_y, nodes_z,
faces_to_nodes, r_face_to_nodes_off, &r_iface_c);
const int r_face_clockwise =
(faces_cclockwise_cell[(r_face_index)] != cc);
// Determine the position of the node in the face list of nodes
for (nn2 = 0; nn2 < nnodes_by_r_face; ++nn2) {
if (faces_to_nodes[(r_face_to_nodes_off + nn2)] == node_index) {
break;
}
}
const int r_face_next_node =
(nn2 == nnodes_by_r_face - 1) ? 0 : nn2 + 1;
const int r_face_prev_node =
(nn2 == 0) ? nnodes_by_r_face - 1 : nn2 - 1;
const int r_face_rnode_off =
(r_face_clockwise ? r_face_prev_node : r_face_next_node);
const int r_face_rnode_index =
faces_to_nodes[(r_face_to_nodes_off + r_face_rnode_off)];
vec_t l_iface_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_lface, nodes_x, nodes_y, nodes_z,
faces_to_nodes, lface_to_nodes_off, &l_iface_c);
vec_t rz_r_iface_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_r_face, rezoned_nodes_x, rezoned_nodes_y,
rezoned_nodes_z, faces_to_nodes, r_face_to_nodes_off,
&rz_r_iface_c);
vec_t rz_l_iface_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_lface, rezoned_nodes_x, rezoned_nodes_y,
rezoned_nodes_z, faces_to_nodes, lface_to_nodes_off,
&rz_l_iface_c);
double inodes_x[2 * NNODES_BY_SUBCELL_FACE] = {
0.5 * (nodes_x[(node_index)] + nodes_x[(r_face_rnode_index)]),
r_iface_c.x, cell_c.x, l_iface_c.x,
0.5 * (rezoned_nodes_x[(node_index)] +
rezoned_nodes_x[(r_face_rnode_index)]),
rz_r_iface_c.x, rz_cell_c.x, rz_l_iface_c.x};
double inodes_y[2 * NNODES_BY_SUBCELL_FACE] = {
0.5 * (nodes_y[(node_index)] + nodes_y[(r_face_rnode_index)]),
r_iface_c.y, cell_c.y, l_iface_c.y,
0.5 * (rezoned_nodes_y[(node_index)] +
rezoned_nodes_y[(r_face_rnode_index)]),
rz_r_iface_c.y, rz_cell_c.y, rz_l_iface_c.y};
double inodes_z[2 * NNODES_BY_SUBCELL_FACE] = {
0.5 * (nodes_z[(node_index)] + nodes_z[(r_face_rnode_index)]),
r_iface_c.z, cell_c.z, l_iface_c.z,
0.5 * (rezoned_nodes_z[(node_index)] +
rezoned_nodes_z[(r_face_rnode_index)]),
rz_r_iface_c.z, rz_cell_c.z, rz_l_iface_c.z};
// Contributes the local mass, energy and momentum flux for a given
// subcell face
flux_mass_energy_momentum(
cc, neighbour_cc, ff, subcell_index, &subcell_c, &cell_c, inodes_x,
inodes_y, inodes_z, subcell_mass, subcell_mass_flux,
subcell_ie_mass, subcell_ie_mass_flux, subcell_ke_mass,
subcell_ke_mass_flux, subcell_volume, subcell_momentum_x,
subcell_momentum_y, subcell_momentum_z, subcell_momentum_flux_x,
subcell_momentum_flux_y, subcell_momentum_flux_z,
swept_edge_faces_to_nodes, subcell_centroids_x, subcell_centroids_y,
subcell_centroids_z, swept_edge_to_faces,
swept_edge_faces_to_nodes_offsets, subcells_to_subcells_offsets,
subcells_to_subcells, subcells_to_faces_offsets, subcells_to_faces,
faces_to_nodes_offsets, faces_to_nodes, cells_to_nodes_offsets,
cells_to_nodes, faces_cclockwise_cell, nodes_x, nodes_y, nodes_z,
1);
/* EXTERNAL FACE */
// We explicitly disallow flux on the boundary, this could be disable
// for testing purposes in order to ensure that no flux is inadvertently
// accumulating on the boundaries
if (neighbour_cc == -1) {
continue;
}
double enodes_x[2 * NNODES_BY_SUBCELL_FACE] = {
nodes_x[(node_index)],
0.5 * (nodes_x[(node_index)] + nodes_x[(rnode_index)]), face_c.x,
0.5 * (nodes_x[(node_index)] + nodes_x[(lnode_index)]),
rezoned_nodes_x[(node_index)],
0.5 * (rezoned_nodes_x[(node_index)] +
rezoned_nodes_x[(rnode_index)]),
rz_face_c.x, 0.5 * (rezoned_nodes_x[(node_index)] +
rezoned_nodes_x[(lnode_index)])};
double enodes_y[2 * NNODES_BY_SUBCELL_FACE] = {
nodes_y[(node_index)],
0.5 * (nodes_y[(node_index)] + nodes_y[(rnode_index)]), face_c.y,
0.5 * (nodes_y[(node_index)] + nodes_y[(lnode_index)]),
rezoned_nodes_y[(node_index)],
0.5 * (rezoned_nodes_y[(node_index)] +
rezoned_nodes_y[(rnode_index)]),
rz_face_c.y, 0.5 * (rezoned_nodes_y[(node_index)] +
rezoned_nodes_y[(lnode_index)])};
double enodes_z[2 * NNODES_BY_SUBCELL_FACE] = {
nodes_z[(node_index)],
0.5 * (nodes_z[(node_index)] + nodes_z[(rnode_index)]), face_c.z,
0.5 * (nodes_z[(node_index)] + nodes_z[(lnode_index)]),
rezoned_nodes_z[(node_index)],
0.5 * (rezoned_nodes_z[(node_index)] +
rezoned_nodes_z[(rnode_index)]),
rz_face_c.z, 0.5 * (rezoned_nodes_z[(node_index)] +
rezoned_nodes_z[(lnode_index)])};
// Contributes the local mass, energy and momentum flux for a given
// subcell face
flux_mass_energy_momentum(
cc, neighbour_cc, ff, subcell_index, &subcell_c, &cell_c, enodes_x,
enodes_y, enodes_z, subcell_mass, subcell_mass_flux,
subcell_ie_mass, subcell_ie_mass_flux, subcell_ke_mass,
subcell_ke_mass_flux, subcell_volume, subcell_momentum_x,
subcell_momentum_y, subcell_momentum_z, subcell_momentum_flux_x,
subcell_momentum_flux_y, subcell_momentum_flux_z,
swept_edge_faces_to_nodes, subcell_centroids_x, subcell_centroids_y,
subcell_centroids_z, swept_edge_to_faces,
swept_edge_faces_to_nodes_offsets, subcells_to_subcells_offsets,
subcells_to_subcells, subcells_to_faces_offsets, subcells_to_faces,
faces_to_nodes_offsets, faces_to_nodes, cells_to_nodes_offsets,
cells_to_nodes, faces_cclockwise_cell, nodes_x, nodes_y, nodes_z,
0);
}
}
}
}
// Contributes the local mass, energy and momentum flux for a given subcell face
void flux_mass_energy_momentum(
const int cc, const int neighbour_cc, const int ff, const int subcell_index,
vec_t* subcell_c, vec_t* cell_c, const double* se_nodes_x,
const double* se_nodes_y, const double* se_nodes_z,
const double* subcell_mass, double* subcell_mass_flux,
const double* subcell_ie_mass, double* subcell_ie_mass_flux,
const double* subcell_ke_mass, double* subcell_ke_mass_flux,
const double* subcell_volume, const double* subcell_momentum_x,
const double* subcell_momentum_y, const double* subcell_momentum_z,
double* subcell_momentum_flux_x, double* subcell_momentum_flux_y,
double* subcell_momentum_flux_z, const int* swept_edge_faces_to_nodes,
const double* subcell_centroids_x, const double* subcell_centroids_y,
const double* subcell_centroids_z, const int* swept_edge_to_faces,
const int* swept_edge_faces_to_nodes_offsets,
const int* subcells_to_subcells_offsets, const int* subcells_to_subcells,
const int* subcells_to_faces_offsets, const int* subcells_to_faces,
const int* faces_to_nodes_offsets, const int* faces_to_nodes,
const int* cells_to_nodes_offsets, const int* cells_to_nodes,
const int* faces_cclockwise_cell, const double* nodes_x,
const double* nodes_y, const double* nodes_z, const int internal) {
// Get the centroids for the swept edge prism and faces
vec_t face_c = {0.0, 0.0, 0.0};
vec_t rz_face_c = {0.0, 0.0, 0.0};
vec_t swept_edge_c = {0.0, 0.0, 0.0};
calc_centroid(NNODES_BY_SUBCELL_FACE, se_nodes_x, se_nodes_y, se_nodes_z,
swept_edge_faces_to_nodes, 0, &face_c);
calc_centroid(NNODES_BY_SUBCELL_FACE, se_nodes_x, se_nodes_y, se_nodes_z,
swept_edge_faces_to_nodes,
swept_edge_faces_to_nodes_offsets[(1)], &rz_face_c);
calc_centroid(2 * NNODES_BY_SUBCELL_FACE, se_nodes_x, se_nodes_y, se_nodes_z,
swept_edge_faces_to_nodes, 0, &swept_edge_c);
// Calculate the volume of the swept edge prism
double swept_edge_vol = 0.0;
calc_volume(0, 2 + NNODES_BY_SUBCELL_FACE, swept_edge_to_faces,
swept_edge_faces_to_nodes, swept_edge_faces_to_nodes_offsets,
se_nodes_x, se_nodes_y, se_nodes_z, &swept_edge_c,
&swept_edge_vol);
// Ignore the special case of an empty swept edge region
if (swept_edge_vol < EPS) {
if (swept_edge_vol < -EPS) {
printf("Negative swept edge volume %d %.12f\n", cc, swept_edge_vol);
}
return;
}
// current sub cell
vec_t ab = {rz_face_c.x - face_c.x, rz_face_c.y - face_c.y,
rz_face_c.z - face_c.z};
vec_t ac = {subcell_c->x - face_c.x, subcell_c->y - face_c.y,
subcell_c->z - face_c.z};
const int is_outflux = (ab.x * ac.x + ab.y * ac.y + ab.z * ac.z > 0.0);
// Depending upon which subcell we are sweeping into, choose the
// subcell index with which to reconstruct the density
const int subcell_to_subcells_off =
subcells_to_subcells_offsets[(subcell_index)];
const int internal_offset = (internal ? 0 : 1);
const int subcell_neighbour_index = subcells_to_subcells[(
subcell_to_subcells_off + 2 * ff + internal_offset)];
// Only perform the sweep on the external face if it isn't a
// boundary
if (subcell_neighbour_index == -1) {
TERMINATE(
"We should not be attempting to flux from boundary. Volume: %.12f.",
swept_edge_vol);
}
// The sweep subcell index is where we will reconstruct the value of the
// swept edge region from
const int sweep_subcell_index =
(is_outflux ? subcell_index : subcell_neighbour_index);
/* CALCULATE THE SWEEP SUBCELL GRADIENTS FOR MASS AND ENERGY */
vec_t inv[3] = {{0.0, 0.0, 0.0}};
vec_t coeff[3] = {{0.0, 0.0, 0.0}};
vec_t m_rhs = {0.0, 0.0, 0.0};
vec_t ie_rhs = {0.0, 0.0, 0.0};
vec_t ke_rhs = {0.0, 0.0, 0.0};
vec_t vx_rhs = {0.0, 0.0, 0.0};
vec_t vy_rhs = {0.0, 0.0, 0.0};
vec_t vz_rhs = {0.0, 0.0, 0.0};
double gmax_m = -DBL_MAX;
double gmin_m = DBL_MAX;
double gmax_ie = -DBL_MAX;
double gmin_ie = DBL_MAX;
double gmax_ke = -DBL_MAX;
double gmin_ke = DBL_MAX;
double gmax_vx = -DBL_MAX;
double gmin_vx = DBL_MAX;
double gmax_vy = -DBL_MAX;
double gmin_vy = DBL_MAX;
double gmax_vz = -DBL_MAX;
double gmin_vz = DBL_MAX;
vec_t sweep_subcell_c = {subcell_centroids_x[(sweep_subcell_index)],
subcell_centroids_y[(sweep_subcell_index)],
subcell_centroids_z[(sweep_subcell_index)]};
const double sweep_subcell_vol = subcell_volume[(sweep_subcell_index)];
const double sweep_subcell_density =
subcell_mass[(sweep_subcell_index)] / sweep_subcell_vol;
const double sweep_subcell_ie_density =
subcell_ie_mass[(sweep_subcell_index)] / sweep_subcell_vol;
const double sweep_subcell_ke_density =
subcell_ke_mass[(sweep_subcell_index)] / sweep_subcell_vol;
vec_t subcell_v = {
subcell_momentum_x[(sweep_subcell_index)] / sweep_subcell_vol,
subcell_momentum_y[(sweep_subcell_index)] / sweep_subcell_vol,
subcell_momentum_z[(sweep_subcell_index)] / sweep_subcell_vol};
const int sweep_subcell_to_subcells_off =
subcells_to_subcells_offsets[(sweep_subcell_index)];
const int nsubcell_neighbours =
subcells_to_subcells_offsets[(sweep_subcell_index + 1)] -
sweep_subcell_to_subcells_off;
for (int ss = 0; ss < nsubcell_neighbours; ++ss) {
const int sweep_neighbour_index =
subcells_to_subcells[(sweep_subcell_to_subcells_off + ss)];
// Ignore boundary neighbours
if (sweep_neighbour_index == -1) {
continue;
}
const double neighbour_vol = subcell_volume[(sweep_neighbour_index)];
vec_t i = {
(subcell_centroids_x[(sweep_neighbour_index)] - sweep_subcell_c.x) *
neighbour_vol,
(subcell_centroids_y[(sweep_neighbour_index)] - sweep_subcell_c.y) *
neighbour_vol,
(subcell_centroids_z[(sweep_neighbour_index)] - sweep_subcell_c.z) *
neighbour_vol};
// Store the neighbouring cell's contribution to the coefficients
coeff[0].x += 2.0 * (i.x * i.x) / (neighbour_vol * neighbour_vol);
coeff[0].y += 2.0 * (i.x * i.y) / (neighbour_vol * neighbour_vol);
coeff[0].z += 2.0 * (i.x * i.z) / (neighbour_vol * neighbour_vol);
coeff[1].x += 2.0 * (i.y * i.x) / (neighbour_vol * neighbour_vol);
coeff[1].y += 2.0 * (i.y * i.y) / (neighbour_vol * neighbour_vol);
coeff[1].z += 2.0 * (i.y * i.z) / (neighbour_vol * neighbour_vol);
coeff[2].x += 2.0 * (i.z * i.x) / (neighbour_vol * neighbour_vol);
coeff[2].y += 2.0 * (i.z * i.y) / (neighbour_vol * neighbour_vol);
coeff[2].z += 2.0 * (i.z * i.z) / (neighbour_vol * neighbour_vol);
// Get subcell quantities of neighbouring subcell
const double neighbour_m_density =
subcell_mass[(sweep_neighbour_index)] / neighbour_vol;
const double neighbour_ie_density =
subcell_ie_mass[(sweep_neighbour_index)] / neighbour_vol;
const double neighbour_ke_density =
subcell_ke_mass[(sweep_neighbour_index)] / neighbour_vol;
vec_t neighbour_v = {
subcell_momentum_x[(sweep_neighbour_index)] / neighbour_vol,
subcell_momentum_y[(sweep_neighbour_index)] / neighbour_vol,
subcell_momentum_z[(sweep_neighbour_index)] / neighbour_vol};
// Determine differentials for subcell quantities
const double dneighbour_m_density =
neighbour_m_density - sweep_subcell_density;
const double dneighbour_ie_density =
neighbour_ie_density - sweep_subcell_ie_density;
const double dneighbour_ke_density =
neighbour_ke_density - sweep_subcell_ke_density;
const double dneighbour_vx = (neighbour_v.x - subcell_v.x);
const double dneighbour_vy = (neighbour_v.y - subcell_v.y);
const double dneighbour_vz = (neighbour_v.z - subcell_v.z);
// Calculate the RHS for each gradient calculation
m_rhs.x += 2.0 * dneighbour_m_density * i.x / neighbour_vol;
m_rhs.y += 2.0 * dneighbour_m_density * i.y / neighbour_vol;
m_rhs.z += 2.0 * dneighbour_m_density * i.z / neighbour_vol;
ie_rhs.x += 2.0 * dneighbour_ie_density * i.x / neighbour_vol;
ie_rhs.y += 2.0 * dneighbour_ie_density * i.y / neighbour_vol;
ie_rhs.z += 2.0 * dneighbour_ie_density * i.z / neighbour_vol;
ke_rhs.x += 2.0 * dneighbour_ke_density * i.x / neighbour_vol;
ke_rhs.y += 2.0 * dneighbour_ke_density * i.y / neighbour_vol;
ke_rhs.z += 2.0 * dneighbour_ke_density * i.z / neighbour_vol;
vx_rhs.x += 2.0 * dneighbour_vx * i.x / neighbour_vol;
vx_rhs.y += 2.0 * dneighbour_vx * i.y / neighbour_vol;
vx_rhs.z += 2.0 * dneighbour_vx * i.z / neighbour_vol;
vy_rhs.x += 2.0 * dneighbour_vy * i.x / neighbour_vol;
vy_rhs.y += 2.0 * dneighbour_vy * i.y / neighbour_vol;
vy_rhs.z += 2.0 * dneighbour_vy * i.z / neighbour_vol;
vz_rhs.x += 2.0 * dneighbour_vz * i.x / neighbour_vol;
vz_rhs.y += 2.0 * dneighbour_vz * i.y / neighbour_vol;
vz_rhs.z += 2.0 * dneighbour_vz * i.z / neighbour_vol;
// Store the maximum / minimum values for rho in the neighbourhood
gmax_m = max(gmax_m, neighbour_m_density);
gmin_m = min(gmin_m, neighbour_m_density);
gmax_ie = max(gmax_ie, neighbour_ie_density);
gmin_ie = min(gmin_ie, neighbour_ie_density);
gmax_ke = max(gmax_ke, neighbour_ke_density);
gmin_ke = min(gmin_ke, neighbour_ke_density);
gmax_vx = max(gmax_vx, neighbour_v.x);
gmin_vx = min(gmin_vx, neighbour_v.x);
gmax_vy = max(gmax_vy, neighbour_v.y);
gmin_vy = min(gmin_vy, neighbour_v.y);
gmax_vz = max(gmax_vz, neighbour_v.z);
gmin_vz = min(gmin_vz, neighbour_v.z);
}
calc_3x3_inverse(&coeff, &inv);
// Calculate the gradients
vec_t grad_m = {inv[0].x * m_rhs.x + inv[0].y * m_rhs.y + inv[0].z * m_rhs.z,
inv[1].x * m_rhs.x + inv[1].y * m_rhs.y + inv[1].z * m_rhs.z,
inv[2].x * m_rhs.x + inv[2].y * m_rhs.y + inv[2].z * m_rhs.z};
vec_t grad_ie = {
inv[0].x * ie_rhs.x + inv[0].y * ie_rhs.y + inv[0].z * ie_rhs.z,
inv[1].x * ie_rhs.x + inv[1].y * ie_rhs.y + inv[1].z * ie_rhs.z,
inv[2].x * ie_rhs.x + inv[2].y * ie_rhs.y + inv[2].z * ie_rhs.z};
vec_t grad_ke = {
inv[0].x * ke_rhs.x + inv[0].y * ke_rhs.y + inv[0].z * ke_rhs.z,
inv[1].x * ke_rhs.x + inv[1].y * ke_rhs.y + inv[1].z * ke_rhs.z,
inv[2].x * ke_rhs.x + inv[2].y * ke_rhs.y + inv[2].z * ke_rhs.z};
vec_t grad_vx = {
inv[0].x * vx_rhs.x + inv[0].y * vx_rhs.y + inv[0].z * vx_rhs.z,
inv[1].x * vx_rhs.x + inv[1].y * vx_rhs.y + inv[1].z * vx_rhs.z,
inv[2].x * vx_rhs.x + inv[2].y * vx_rhs.y + inv[2].z * vx_rhs.z};
vec_t grad_vy = {
inv[0].x * vy_rhs.x + inv[0].y * vy_rhs.y + inv[0].z * vy_rhs.z,
inv[1].x * vy_rhs.x + inv[1].y * vy_rhs.y + inv[1].z * vy_rhs.z,
inv[2].x * vy_rhs.x + inv[2].y * vy_rhs.y + inv[2].z * vy_rhs.z};
vec_t grad_vz = {
inv[0].x * vz_rhs.x + inv[0].y * vz_rhs.y + inv[0].z * vz_rhs.z,
inv[1].x * vz_rhs.x + inv[1].y * vz_rhs.y + inv[1].z * vz_rhs.z,
inv[2].x * vz_rhs.x + inv[2].y * vz_rhs.y + inv[2].z * vz_rhs.z};
/* LIMIT THE GRADIENT */
// Performing the limiting actually requires the sweep subcell's nodes
double m_limiter = 1.0;
double ie_limiter = 1.0;
double ke_limiter = 1.0;
double vx_limiter = 1.0;
double vy_limiter = 1.0;
double vz_limiter = 1.0;
const int sweep_subcell_to_faces_off =
subcells_to_faces_offsets[(sweep_subcell_index)];
const int nfaces_by_sweep_subcell =
subcells_to_faces_offsets[(sweep_subcell_index + 1)] -
sweep_subcell_to_faces_off;
// Limit at node
const int sweep_node_index = cells_to_nodes[(sweep_subcell_index)];
vec_t sweep_node = {nodes_x[(sweep_node_index)], nodes_y[(sweep_node_index)],
nodes_z[(sweep_node_index)]};
limit_mass_gradients(
sweep_node, &sweep_subcell_c, sweep_subcell_density,
sweep_subcell_ie_density, sweep_subcell_ke_density, subcell_v.x,
subcell_v.y, subcell_v.z, gmax_m, gmin_m, gmax_ie, gmin_ie, gmax_ke,
gmin_ke, gmax_vx, gmin_vx, gmax_vy, gmin_vy, gmax_vz, gmin_vz, &grad_m,
&grad_ie, &grad_ke, &grad_vx, &grad_vy, &grad_vz, &m_limiter, &ie_limiter,
&ke_limiter, &vx_limiter, &vy_limiter, &vz_limiter);
vec_t sweep_cell_c;
if (internal || is_outflux) {
sweep_cell_c = *cell_c;
} else {
// Faster or slower than accessing cell_centroids_... ?
const int cell_to_nodes_off = cells_to_nodes_offsets[(neighbour_cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(neighbour_cc + 1)] - cell_to_nodes_off;
calc_centroid(nnodes_by_cell, nodes_x, nodes_y, nodes_z, cells_to_nodes,
cell_to_nodes_off, &sweep_cell_c);
}
// Limit at cell center
limit_mass_gradients(
sweep_cell_c, &sweep_subcell_c, sweep_subcell_density,
sweep_subcell_ie_density, sweep_subcell_ke_density, subcell_v.x,
subcell_v.y, subcell_v.z, gmax_m, gmin_m, gmax_ie, gmin_ie, gmax_ke,
gmin_ke, gmax_vx, gmin_vx, gmax_vy, gmin_vy, gmax_vz, gmin_vz, &grad_m,
&grad_ie, &grad_ke, &grad_vx, &grad_vy, &grad_vz, &m_limiter, &ie_limiter,
&ke_limiter, &vx_limiter, &vy_limiter, &vz_limiter);
// Limit at half edges and face centers
for (int ff2 = 0; ff2 < nfaces_by_sweep_subcell; ++ff2) {
const int sweep_face_index =
subcells_to_faces[(sweep_subcell_to_faces_off + ff)];
const int sweep_face_to_nodes_off =
faces_to_nodes_offsets[(sweep_face_index)];
const int nnodes_by_sweep_face =
faces_to_nodes_offsets[(sweep_face_index + 1)] -
sweep_face_to_nodes_off;
// The face centroid is the same for all nodes on the face
vec_t sweep_face_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_sweep_face, nodes_x, nodes_y, nodes_z,
faces_to_nodes, sweep_face_to_nodes_off, &sweep_face_c);
limit_mass_gradients(
sweep_face_c, &sweep_subcell_c, sweep_subcell_density,
sweep_subcell_ie_density, sweep_subcell_ke_density, subcell_v.x,
subcell_v.y, subcell_v.z, gmax_m, gmin_m, gmax_ie, gmin_ie, gmax_ke,
gmin_ke, gmax_vx, gmin_vx, gmax_vy, gmin_vy, gmax_vz, gmin_vz, &grad_m,
&grad_ie, &grad_ke, &grad_vx, &grad_vy, &grad_vz, &m_limiter,
&ie_limiter, &ke_limiter, &vx_limiter, &vy_limiter, &vz_limiter);
// Determine the position of the node in the face list of nodes
int nn2;
for (nn2 = 0; nn2 < nnodes_by_sweep_face; ++nn2) {
if (faces_to_nodes[(sweep_face_to_nodes_off + nn2)] == sweep_node_index) {
break;
}
}
const int next_node = (nn2 == nnodes_by_sweep_face - 1) ? 0 : nn2 + 1;
const int prev_node = (nn2 == 0) ? nnodes_by_sweep_face - 1 : nn2 - 1;
const int face_clockwise =
(faces_cclockwise_cell[(sweep_face_index)] != cc);
const int rnode_off = (face_clockwise ? prev_node : next_node);
const int rnode_index =
faces_to_nodes[(sweep_face_to_nodes_off + rnode_off)];
// Get the halfway point on the right edge
vec_t half_edge = {0.5 * (sweep_node.x + nodes_x[(rnode_index)]),
0.5 * (sweep_node.y + nodes_y[(rnode_index)]),
0.5 * (sweep_node.z + nodes_z[(rnode_index)])};
// Limit at cell center
limit_mass_gradients(
half_edge, &sweep_subcell_c, sweep_subcell_density,
sweep_subcell_ie_density, sweep_subcell_ke_density, subcell_v.x,
subcell_v.y, subcell_v.z, gmax_m, gmin_m, gmax_ie, gmin_ie, gmax_ke,
gmin_ke, gmax_vx, gmin_vx, gmax_vy, gmin_vy, gmax_vz, gmin_vz, &grad_m,
&grad_ie, &grad_ke, &grad_vx, &grad_vy, &grad_vz, &m_limiter,
&ie_limiter, &ke_limiter, &vx_limiter, &vy_limiter, &vz_limiter);
}
grad_m.x *= m_limiter;
grad_m.y *= m_limiter;
grad_m.z *= m_limiter;
grad_ie.x *= ie_limiter;
grad_ie.y *= ie_limiter;
grad_ie.z *= ie_limiter;
grad_ke.x *= ke_limiter;
grad_ke.y *= ke_limiter;
grad_ke.z *= ke_limiter;
grad_vx.x *= vx_limiter;
grad_vx.y *= vx_limiter;
grad_vx.z *= vx_limiter;
grad_vy.x *= vy_limiter;
grad_vy.y *= vy_limiter;
grad_vy.z *= vy_limiter;
grad_vz.x *= vz_limiter;
grad_vz.y *= vz_limiter;
grad_vz.z *= vz_limiter;
const double dx = swept_edge_c.x - sweep_subcell_c.x;
const double dy = swept_edge_c.y - sweep_subcell_c.y;
const double dz = swept_edge_c.z - sweep_subcell_c.z;
// Calculate the fluxes for the different quantities
const double local_mass_flux =
swept_edge_vol *
(sweep_subcell_density + grad_m.x * dx + grad_m.y * dy + grad_m.z * dz);
const double local_ie_flux =
swept_edge_vol * (sweep_subcell_ie_density + grad_ie.x * dx +
grad_ie.y * dy + grad_ie.z * dz);
const double local_ke_flux =
swept_edge_vol * (sweep_subcell_ke_density + grad_ke.x * dx +
grad_ke.y * dy + grad_ke.z * dz);
const double local_x_momentum_flux =
swept_edge_vol *
(subcell_v.x + grad_vx.x * dx + grad_vx.y * dy + grad_vx.z * dz);
const double local_y_momentum_flux =
swept_edge_vol *
(subcell_v.y + grad_vy.x * dx + grad_vy.y * dy + grad_vy.z * dz);
const double local_z_momentum_flux =
swept_edge_vol *
(subcell_v.z + grad_vz.x * dx + grad_vz.y * dy + grad_vz.z * dz);
// Mass and energy are either flowing into or out of the subcell
if (is_outflux) {
subcell_mass_flux[(subcell_index)] += local_mass_flux;
subcell_ie_mass_flux[(subcell_index)] += local_ie_flux;
subcell_ke_mass_flux[(subcell_index)] += local_ke_flux;
subcell_momentum_flux_x[(subcell_index)] += local_x_momentum_flux;
subcell_momentum_flux_y[(subcell_index)] += local_y_momentum_flux;
subcell_momentum_flux_z[(subcell_index)] += local_z_momentum_flux;
} else {
subcell_mass_flux[(subcell_index)] -= local_mass_flux;
subcell_ie_mass_flux[(subcell_index)] -= local_ie_flux;
subcell_ke_mass_flux[(subcell_index)] -= local_ke_flux;
subcell_momentum_flux_x[(subcell_index)] -= local_x_momentum_flux;
subcell_momentum_flux_y[(subcell_index)] -= local_y_momentum_flux;
subcell_momentum_flux_z[(subcell_index)] -= local_z_momentum_flux;
}
}
// Calculate the normal vector from the provided nodes
void calc_unit_normal(const int n0, const int n1, const int n2,
const double* nodes_x, const double* nodes_y,
const double* nodes_z, vec_t* normal) {
// Calculate the normal
calc_normal(n0, n1, n2, nodes_x, nodes_y, nodes_z, normal);
// Normalise the normal
double len = sqrt(normal->x * normal->x + normal->y * normal->y +
normal->z * normal->z);
// Force propagation of zero length normal
if (len < EPS) {
len = 0.0;
}
normal->x /= len;
normal->y /= len;
normal->z /= len;
}
// Calculate the normal for a plane
void calc_normal(const int n0, const int n1, const int n2,
const double* nodes_x, const double* nodes_y,
const double* nodes_z, vec_t* normal) {
// Get two vectors on the face plane
vec_t dn0 = {0.0, 0.0, 0.0};
vec_t dn1 = {0.0, 0.0, 0.0};
// Outwards facing normal for clockwise ordering
dn0.x = nodes_x[(n0)] - nodes_x[(n1)];
dn0.y = nodes_y[(n0)] - nodes_y[(n1)];
dn0.z = nodes_z[(n0)] - nodes_z[(n1)];
dn1.x = nodes_x[(n2)] - nodes_x[(n1)];
dn1.y = nodes_y[(n2)] - nodes_y[(n1)];
dn1.z = nodes_z[(n2)] - nodes_z[(n1)];
// Cross product to get the normal
normal->x = (dn0.y * dn1.z - dn1.y * dn0.z);
normal->y = (dn0.z * dn1.x - dn1.z * dn0.x);
normal->z = (dn0.x * dn1.y - dn1.x * dn0.y);
}
// Contributes a face to the volume of some cell
// Expects a non-overlapping polyhedra, allowing non-planar faces
void contribute_face_volume(const int nnodes_by_face, const int* faces_to_nodes,
const double* nodes_x, const double* nodes_y,
const double* nodes_z, const vec_t* cell_c,
double* vol) {
vec_t face_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_face, nodes_x, nodes_y, nodes_z, faces_to_nodes, 0,
&face_c);
for (int nn = 0; nn < nnodes_by_face; ++nn) {
// Fetch the nodes attached to our current node on the current face
const int current_node = faces_to_nodes[(nn)];
const int next_node = (nn + 1 < nnodes_by_face) ? faces_to_nodes[(nn + 1)]
: faces_to_nodes[(0)];
// Get the halfway point on the right edge
vec_t half_edge = {0.5 * (nodes_x[(current_node)] + nodes_x[(next_node)]),
0.5 * (nodes_y[(current_node)] + nodes_y[(next_node)]),
0.5 * (nodes_z[(current_node)] + nodes_z[(next_node)])};
// Setup basis on plane of tetrahedron
vec_t a = {(half_edge.x - face_c.x), (half_edge.y - face_c.y),
(half_edge.z - face_c.z)};
vec_t b = {(cell_c->x - face_c.x), (cell_c->y - face_c.y),
(cell_c->z - face_c.z)};
vec_t ab = {(half_edge.x - nodes_x[(current_node)]),
(half_edge.y - nodes_y[(current_node)]),
(half_edge.z - nodes_z[(current_node)])};
// Calculate the area vector S using cross product
vec_t S = {0.5 * (a.y * b.z - a.z * b.y), -0.5 * (a.x * b.z - a.z * b.x),
0.5 * (a.x * b.y - a.y * b.x)};
*vol += 2.0 * fabs(ab.x * S.x + ab.y * S.y + ab.z * S.z) / 3.0;
}
}
// Calculates the weighted volume dist for a provided cell along x-y-z
void calc_volume(const int cell_to_faces_off, const int nfaces_by_cell,
const int* cells_to_faces, const int* faces_to_nodes,
const int* faces_to_nodes_offsets, const double* nodes_x,
const double* nodes_y, const double* nodes_z,
const vec_t* cell_c, double* vol) {
// Prepare to accumulate the volume
*vol = 0.0;
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int face_to_nodes_off = faces_to_nodes_offsets[(face_index)];
const int nnodes_by_face =
faces_to_nodes_offsets[(face_index + 1)] - face_to_nodes_off;
contribute_face_volume(nnodes_by_face, &faces_to_nodes[(face_to_nodes_off)],
nodes_x, nodes_y, nodes_z, cell_c, vol);
if (isnan(*vol)) {
*vol = 0.0;
return;
}
}
*vol = fabs(*vol);
}
// Stores the rezoned mesh specification as the original mesh. Until we
// determine a reasonable rezoning algorithm, this makes us Eulerian
void store_rezoned_mesh(const int nnodes, const double* nodes_x,
const double* nodes_y, const double* nodes_z,
double* rezoned_nodes_x, double* rezoned_nodes_y,
double* rezoned_nodes_z) {
// Store the rezoned nodes
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
rezoned_nodes_x[(nn)] = nodes_x[(nn)];
rezoned_nodes_y[(nn)] = nodes_y[(nn)];
rezoned_nodes_z[(nn)] = nodes_z[(nn)];
}
}
// Calculates the inverse of a 3x3 matrix, out-of-place
void calc_3x3_inverse(vec_t (*a)[3], vec_t (*inv)[3]) {
// Calculate the determinant of the 3x3
const double det =
(*a)[0].x * ((*a)[1].y * (*a)[2].z - (*a)[1].z * (*a)[2].y) -
(*a)[0].y * ((*a)[1].x * (*a)[2].z - (*a)[1].z * (*a)[2].x) +
(*a)[0].z * ((*a)[1].x * (*a)[2].y - (*a)[1].y * (*a)[2].x);
// Check if the matrix is singular
if (det == 0.0) {
TERMINATE("singular coefficient matrix");
} else {
// Perform the simple and fast 3x3 matrix inverstion
(*inv)[0].x = ((*a)[1].y * (*a)[2].z - (*a)[1].z * (*a)[2].y) / det;
(*inv)[0].y = ((*a)[0].z * (*a)[2].y - (*a)[0].y * (*a)[2].z) / det;
(*inv)[0].z = ((*a)[0].y * (*a)[1].z - (*a)[0].z * (*a)[1].y) / det;
(*inv)[1].x = ((*a)[1].z * (*a)[2].x - (*a)[1].x * (*a)[2].z) / det;
(*inv)[1].y = ((*a)[0].x * (*a)[2].z - (*a)[0].z * (*a)[2].x) / det;
(*inv)[1].z = ((*a)[0].z * (*a)[1].x - (*a)[0].x * (*a)[1].z) / det;
(*inv)[2].x = ((*a)[1].x * (*a)[2].y - (*a)[1].y * (*a)[2].x) / det;
(*inv)[2].y = ((*a)[0].y * (*a)[2].x - (*a)[0].x * (*a)[2].y) / det;
(*inv)[2].z = ((*a)[0].x * (*a)[1].y - (*a)[0].y * (*a)[1].x) / det;
}
}
// Calculates the local limiter for a cell
double calc_cell_limiter(const double rho, const double gmax, const double gmin,
vec_t* grad, const double node_x, const double node_y,
const double node_z, const vec_t* cell_c) {
double g_unlimited = rho + grad->x * (node_x - cell_c->x) +
grad->y * (node_y - cell_c->y) +
grad->z * (node_z - cell_c->z);
double limiter = 1.0;
if (g_unlimited - rho > 0.0) {
limiter = min(limiter, (gmax - rho) / (g_unlimited - rho));
} else if (g_unlimited - rho < 0.0) {
limiter = min(limiter, (gmin - rho) / (g_unlimited - rho));
}
return max(limiter, 0.0);
}
// Calculates the limiter for the provided gradient
double apply_cell_limiter(const int nnodes_by_cell, const int cell_to_nodes_off,
const int* cells_to_nodes, vec_t* grad,
const vec_t* cell_c, const double* nodes_x,
const double* nodes_y, const double* nodes_z,
const double rho, const double gmax,
const double gmin) {
// Calculate the limiter for the gradient
double limiter = 1.0;
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
limiter = min(limiter, calc_cell_limiter(rho, gmax, gmin, grad,
nodes_x[(node_index)],
nodes_y[(node_index)],
nodes_z[(node_index)], cell_c));
}
grad->x *= limiter;
grad->y *= limiter;
grad->z *= limiter;
return limiter;
}
// Applies the mesh rezoning strategy. This is a pure Eulerian strategy.
void apply_mesh_rezoning(const int nnodes, const double* rezoned_nodes_x,
const double* rezoned_nodes_y,
const double* rezoned_nodes_z, double* nodes_x,
double* nodes_y, double* nodes_z) {
// Apply the rezoned mesh into the main mesh
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
nodes_x[(nn)] = rezoned_nodes_x[(nn)];
nodes_y[(nn)] = rezoned_nodes_y[(nn)];
nodes_z[(nn)] = rezoned_nodes_z[(nn)];
}
}
// Limits all of the gradients during flux determination
void limit_mass_gradients(
vec_t nodes, vec_t* sweep_subcell_c, const double sweep_subcell_density,
const double sweep_subcell_ie_density,
const double sweep_subcell_ke_density, const double subcell_vx,
const double subcell_vy, const double subcell_vz, const double gmax_m,
const double gmin_m, const double gmax_ie, const double gmin_ie,
const double gmax_ke, const double gmin_ke, const double gmax_vx,
const double gmin_vx, const double gmax_vy, const double gmin_vy,
const double gmax_vz, const double gmin_vz, vec_t* grad_m, vec_t* grad_ie,
vec_t* grad_ke, vec_t* grad_vx, vec_t* grad_vy, vec_t* grad_vz,
double* m_limiter, double* ie_limiter, double* ke_limiter,
double* vx_limiter, double* vy_limiter, double* vz_limiter) {
*m_limiter =
min(*m_limiter,
calc_cell_limiter(sweep_subcell_density, gmax_m, gmin_m, grad_m,
nodes.x, nodes.y, nodes.z, sweep_subcell_c));
*ie_limiter =
min(*ie_limiter,
calc_cell_limiter(sweep_subcell_ie_density, gmax_ie, gmin_ie, grad_ie,
nodes.x, nodes.y, nodes.z, sweep_subcell_c));
*ke_limiter =
min(*ke_limiter,
calc_cell_limiter(sweep_subcell_ke_density, gmax_ke, gmin_ke, grad_ke,
nodes.x, nodes.y, nodes.z, sweep_subcell_c));
*vx_limiter = min(*vx_limiter, calc_cell_limiter(subcell_vx, gmax_vx, gmin_vx,
grad_vx, nodes.x, nodes.y,
nodes.z, sweep_subcell_c));
*vy_limiter = min(*vy_limiter, calc_cell_limiter(subcell_vy, gmax_vy, gmin_vy,
grad_vy, nodes.x, nodes.y,
nodes.z, sweep_subcell_c));
*vz_limiter = min(*vz_limiter, calc_cell_limiter(subcell_vz, gmax_vz, gmin_vz,
grad_vz, nodes.x, nodes.y,
nodes.z, sweep_subcell_c));
}
|
c-decl.c | /* Process declarations and variables for C compiler.
Copyright (C) 1988-2018 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Process declarations and symbol lookup for C front end.
Also constructs types; the standard scalar types at initialization,
and structure, union, array and enum types when they are declared. */
/* ??? not all decl nodes are given the most useful possible
line numbers. For example, the CONST_DECLs for enum values. */
#include "config.h"
#define INCLUDE_UNIQUE_PTR
#include "system.h"
#include "coretypes.h"
#include "target.h"
#include "function.h"
#include "c-tree.h"
#include "timevar.h"
#include "stringpool.h"
#include "cgraph.h"
#include "intl.h"
#include "print-tree.h"
#include "stor-layout.h"
#include "varasm.h"
#include "attribs.h"
#include "toplev.h"
#include "debug.h"
#include "c-family/c-objc.h"
#include "c-family/c-pragma.h"
#include "c-family/c-ubsan.h"
#include "c-lang.h"
#include "langhooks.h"
#include "tree-iterator.h"
#include "dumpfile.h"
#include "plugin.h"
#include "c-family/c-ada-spec.h"
#include "builtins.h"
#include "spellcheck-tree.h"
#include "gcc-rich-location.h"
#include "asan.h"
#include "c-family/name-hint.h"
#include "c-family/known-headers.h"
#include "c-family/c-spellcheck.h"
/* In grokdeclarator, distinguish syntactic contexts of declarators. */
enum decl_context
{ NORMAL, /* Ordinary declaration */
FUNCDEF, /* Function definition */
PARM, /* Declaration of parm before function body */
FIELD, /* Declaration inside struct or union */
TYPENAME}; /* Typename (inside cast or sizeof) */
/* States indicating how grokdeclarator() should handle declspecs marked
with __attribute__((deprecated)). An object declared as
__attribute__((deprecated)) suppresses warnings of uses of other
deprecated items. */
enum deprecated_states {
DEPRECATED_NORMAL,
DEPRECATED_SUPPRESS
};
/* Nonzero if we have seen an invalid cross reference
to a struct, union, or enum, but not yet printed the message. */
tree pending_invalid_xref;
/* File and line to appear in the eventual error message. */
location_t pending_invalid_xref_location;
/* The file and line that the prototype came from if this is an
old-style definition; used for diagnostics in
store_parm_decls_oldstyle. */
static location_t current_function_prototype_locus;
/* Whether this prototype was built-in. */
static bool current_function_prototype_built_in;
/* The argument type information of this prototype. */
static tree current_function_prototype_arg_types;
/* The argument information structure for the function currently being
defined. */
static struct c_arg_info *current_function_arg_info;
/* The obstack on which parser and related data structures, which are
not live beyond their top-level declaration or definition, are
allocated. */
struct obstack parser_obstack;
/* The current statement tree. */
static GTY(()) struct stmt_tree_s c_stmt_tree;
/* State saving variables. */
tree c_break_label;
tree c_cont_label;
/* A list of decls to be made automatically visible in each file scope. */
static GTY(()) tree visible_builtins;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
int current_function_returns_abnormally;
/* Set to nonzero by `grokdeclarator' for a function
whose return type is defaulted, if warnings for this are desired. */
static int warn_about_return_type;
/* Nonzero when the current toplevel function contains a declaration
of a nested function which is never defined. */
static bool undef_nested_function;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int current_omp_declare_target_attribute;
/* Each c_binding structure describes one binding of an identifier to
a decl. All the decls in a scope - irrespective of namespace - are
chained together by the ->prev field, which (as the name implies)
runs in reverse order. All the decls in a given namespace bound to
a given identifier are chained by the ->shadowed field, which runs
from inner to outer scopes.
The ->decl field usually points to a DECL node, but there are two
exceptions. In the namespace of type tags, the bound entity is a
RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared
identifier is encountered, it is bound to error_mark_node to
suppress further errors about that identifier in the current
function.
The ->u.type field stores the type of the declaration in this scope;
if NULL, the type is the type of the ->decl field. This is only of
relevance for objects with external or internal linkage which may
be redeclared in inner scopes, forming composite types that only
persist for the duration of those scopes. In the external scope,
this stores the composite of all the types declared for this
object, visible or not. The ->inner_comp field (used only at file
scope) stores whether an incomplete array type at file scope was
completed at an inner scope to an array size other than 1.
The ->u.label field is used for labels. It points to a structure
which stores additional information used for warnings.
The depth field is copied from the scope structure that holds this
decl. It is used to preserve the proper ordering of the ->shadowed
field (see bind()) and also for a handful of special-case checks.
Finally, the invisible bit is true for a decl which should be
ignored for purposes of normal name lookup, and the nested bit is
true for a decl that's been bound a second time in an inner scope;
in all such cases, the binding in the outer scope will have its
invisible bit true. */
struct GTY((chain_next ("%h.prev"))) c_binding {
union GTY(()) { /* first so GTY desc can use decl */
tree GTY((tag ("0"))) type; /* the type in this scope */
struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */
} GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u;
tree decl; /* the decl bound */
tree id; /* the identifier it's bound to */
struct c_binding *prev; /* the previous decl in this scope */
struct c_binding *shadowed; /* the innermost decl shadowed by this one */
unsigned int depth : 28; /* depth of this scope */
BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */
BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */
BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */
BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */
location_t locus; /* location for nested bindings */
};
#define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth)
#define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth)
#define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/)
#define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/)
/* Each C symbol points to three linked lists of c_binding structures.
These describe the values of the identifier in the three different
namespaces defined by the language. */
struct GTY(()) lang_identifier {
struct c_common_identifier common_id;
struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */
struct c_binding *tag_binding; /* struct/union/enum tags */
struct c_binding *label_binding; /* labels */
};
/* Validate c-lang.c's assumptions. */
extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate
[(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1];
/* The binding oracle; see c-tree.h. */
void (*c_binding_oracle) (enum c_oracle_request, tree identifier);
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's symbol binding. */
#define I_SYMBOL_CHECKED(node) \
(TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding* *
i_symbol_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->symbol_binding == NULL
&& c_binding_oracle != NULL
&& !I_SYMBOL_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_SYMBOL_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_SYMBOL, node);
}
return &lid->symbol_binding;
}
#define I_SYMBOL_BINDING(node) (*i_symbol_binding (node))
#define I_SYMBOL_DECL(node) \
(I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0)
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's tag binding. */
#define I_TAG_CHECKED(node) \
(TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding **
i_tag_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->tag_binding == NULL
&& c_binding_oracle != NULL
&& !I_TAG_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_TAG_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_TAG, node);
}
return &lid->tag_binding;
}
#define I_TAG_BINDING(node) (*i_tag_binding (node))
#define I_TAG_DECL(node) \
(I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0)
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's label binding. */
#define I_LABEL_CHECKED(node) \
(TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding **
i_label_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->label_binding == NULL
&& c_binding_oracle != NULL
&& !I_LABEL_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_LABEL_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_LABEL, node);
}
return &lid->label_binding;
}
#define I_LABEL_BINDING(node) (*i_label_binding (node))
#define I_LABEL_DECL(node) \
(I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0)
/* The resulting tree type. */
union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node
{
union tree_node GTY ((tag ("0"),
desc ("tree_node_structure (&%h)")))
generic;
struct lang_identifier GTY ((tag ("1"))) identifier;
};
/* Track bindings and other things that matter for goto warnings. For
efficiency, we do not gather all the decls at the point of
definition. Instead, we point into the bindings structure. As
scopes are popped, we update these structures and gather the decls
that matter at that time. */
struct GTY(()) c_spot_bindings {
/* The currently open scope which holds bindings defined when the
label was defined or the goto statement was found. */
struct c_scope *scope;
/* The bindings in the scope field which were defined at the point
of the label or goto. This lets us look at older or newer
bindings in the scope, as appropriate. */
struct c_binding *bindings_in_scope;
/* The number of statement expressions that have started since this
label or goto statement was defined. This is zero if we are at
the same statement expression level. It is positive if we are in
a statement expression started since this spot. It is negative
if this spot was in a statement expression and we have left
it. */
int stmt_exprs;
/* Whether we started in a statement expression but are no longer in
it. This is set to true if stmt_exprs ever goes negative. */
bool left_stmt_expr;
};
/* This structure is used to keep track of bindings seen when a goto
statement is defined. This is only used if we see the goto
statement before we see the label. */
struct GTY(()) c_goto_bindings {
/* The location of the goto statement. */
location_t loc;
/* The bindings of the goto statement. */
struct c_spot_bindings goto_bindings;
};
typedef struct c_goto_bindings *c_goto_bindings_p;
/* The additional information we keep track of for a label binding.
These fields are updated as scopes are popped. */
struct GTY(()) c_label_vars {
/* The shadowed c_label_vars, when one label shadows another (which
can only happen using a __label__ declaration). */
struct c_label_vars *shadowed;
/* The bindings when the label was defined. */
struct c_spot_bindings label_bindings;
/* A list of decls that we care about: decls about which we should
warn if a goto branches to this label from later in the function.
Decls are added to this list as scopes are popped. We only add
the decls that matter. */
vec<tree, va_gc> *decls_in_scope;
/* A list of goto statements to this label. This is only used for
goto statements seen before the label was defined, so that we can
issue appropriate warnings for them. */
vec<c_goto_bindings_p, va_gc> *gotos;
};
/* Each c_scope structure describes the complete contents of one
scope. Four scopes are distinguished specially: the innermost or
current scope, the innermost function scope, the file scope (always
the second to outermost) and the outermost or external scope.
Most declarations are recorded in the current scope.
All normal label declarations are recorded in the innermost
function scope, as are bindings of undeclared identifiers to
error_mark_node. (GCC permits nested functions as an extension,
hence the 'innermost' qualifier.) Explicitly declared labels
(using the __label__ extension) appear in the current scope.
Being in the file scope (current_scope == file_scope) causes
special behavior in several places below. Also, under some
conditions the Objective-C front end records declarations in the
file scope even though that isn't the current scope.
All declarations with external linkage are recorded in the external
scope, even if they aren't visible there; this models the fact that
such declarations are visible to the entire program, and (with a
bit of cleverness, see pushdecl) allows diagnosis of some violations
of C99 6.2.2p7 and 6.2.7p2:
If, within the same translation unit, the same identifier appears
with both internal and external linkage, the behavior is
undefined.
All declarations that refer to the same object or function shall
have compatible type; otherwise, the behavior is undefined.
Initially only the built-in declarations, which describe compiler
intrinsic functions plus a subset of the standard library, are in
this scope.
The order of the blocks list matters, and it is frequently appended
to. To avoid having to walk all the way to the end of the list on
each insertion, or reverse the list later, we maintain a pointer to
the last list entry. (FIXME: It should be feasible to use a reversed
list here.)
The bindings list is strictly in reverse order of declarations;
pop_scope relies on this. */
struct GTY((chain_next ("%h.outer"))) c_scope {
/* The scope containing this one. */
struct c_scope *outer;
/* The next outermost function scope. */
struct c_scope *outer_function;
/* All bindings in this scope. */
struct c_binding *bindings;
/* For each scope (except the global one), a chain of BLOCK nodes
for all the scopes that were entered and exited one level down. */
tree blocks;
tree blocks_last;
/* The depth of this scope. Used to keep the ->shadowed chain of
bindings sorted innermost to outermost. */
unsigned int depth : 28;
/* True if we are currently filling this scope with parameter
declarations. */
BOOL_BITFIELD parm_flag : 1;
/* True if we saw [*] in this scope. Used to give an error messages
if these appears in a function definition. */
BOOL_BITFIELD had_vla_unspec : 1;
/* True if we already complained about forward parameter decls
in this scope. This prevents double warnings on
foo (int a; int b; ...) */
BOOL_BITFIELD warned_forward_parm_decls : 1;
/* True if this is the outermost block scope of a function body.
This scope contains the parameters, the local variables declared
in the outermost block, and all the labels (except those in
nested functions, or declared at block scope with __label__). */
BOOL_BITFIELD function_body : 1;
/* True means make a BLOCK for this scope no matter what. */
BOOL_BITFIELD keep : 1;
/* True means that an unsuffixed float constant is _Decimal64. */
BOOL_BITFIELD float_const_decimal64 : 1;
/* True if this scope has any label bindings. This is used to speed
up searching for labels when popping scopes, particularly since
labels are normally only found at function scope. */
BOOL_BITFIELD has_label_bindings : 1;
/* True if we should issue a warning if a goto statement crosses any
of the bindings. We still need to check the list of bindings to
find the specific ones we need to warn about. This is true if
decl_jump_unsafe would return true for any of the bindings. This
is used to avoid looping over all the bindings unnecessarily. */
BOOL_BITFIELD has_jump_unsafe_decl : 1;
};
/* The scope currently in effect. */
static GTY(()) struct c_scope *current_scope;
/* The innermost function scope. Ordinary (not explicitly declared)
labels, bindings to error_mark_node, and the lazily-created
bindings of __func__ and its friends get this scope. */
static GTY(()) struct c_scope *current_function_scope;
/* The C file scope. This is reset for each input translation unit. */
static GTY(()) struct c_scope *file_scope;
/* The outermost scope. This is used for all declarations with
external linkage, and only these, hence the name. */
static GTY(()) struct c_scope *external_scope;
/* A chain of c_scope structures awaiting reuse. */
static GTY((deletable)) struct c_scope *scope_freelist;
/* A chain of c_binding structures awaiting reuse. */
static GTY((deletable)) struct c_binding *binding_freelist;
/* Append VAR to LIST in scope SCOPE. */
#define SCOPE_LIST_APPEND(scope, list, decl) do { \
struct c_scope *s_ = (scope); \
tree d_ = (decl); \
if (s_->list##_last) \
BLOCK_CHAIN (s_->list##_last) = d_; \
else \
s_->list = d_; \
s_->list##_last = d_; \
} while (0)
/* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */
#define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \
struct c_scope *t_ = (tscope); \
struct c_scope *f_ = (fscope); \
if (t_->to##_last) \
BLOCK_CHAIN (t_->to##_last) = f_->from; \
else \
t_->to = f_->from; \
t_->to##_last = f_->from##_last; \
} while (0)
/* A c_inline_static structure stores details of a static identifier
referenced in a definition of a function that may be an inline
definition if no subsequent declaration of that function uses
"extern" or does not use "inline". */
struct GTY((chain_next ("%h.next"))) c_inline_static {
/* The location for a diagnostic. */
location_t location;
/* The function that may be an inline definition. */
tree function;
/* The object or function referenced. */
tree static_decl;
/* What sort of reference this is. */
enum c_inline_static_type type;
/* The next such structure or NULL. */
struct c_inline_static *next;
};
/* List of static identifiers used or referenced in functions that may
be inline definitions. */
static GTY(()) struct c_inline_static *c_inline_statics;
/* True means unconditionally make a BLOCK for the next scope pushed. */
static bool keep_next_level_flag;
/* True means the next call to push_scope will be the outermost scope
of a function body, so do not push a new scope, merely cease
expecting parameter decls. */
static bool next_is_function_body;
/* A vector of pointers to c_binding structures. */
typedef struct c_binding *c_binding_ptr;
/* Information that we keep for a struct or union while it is being
parsed. */
struct c_struct_parse_info
{
/* If warn_cxx_compat, a list of types defined within this
struct. */
auto_vec<tree> struct_types;
/* If warn_cxx_compat, a list of field names which have bindings,
and which are defined in this struct, but which are not defined
in any enclosing struct. This is used to clear the in_struct
field of the c_bindings structure. */
auto_vec<c_binding_ptr> fields;
/* If warn_cxx_compat, a list of typedef names used when defining
fields in this struct. */
auto_vec<tree> typedefs_seen;
};
/* Information for the struct or union currently being parsed, or
NULL if not parsing a struct or union. */
static struct c_struct_parse_info *struct_parse_info;
/* Forward declarations. */
static tree lookup_name_in_scope (tree, struct c_scope *);
static tree c_make_fname_decl (location_t, tree, int);
static tree grokdeclarator (const struct c_declarator *,
struct c_declspecs *,
enum decl_context, bool, tree *, tree *, tree *,
bool *, enum deprecated_states);
static tree grokparms (struct c_arg_info *, bool);
static void layout_array_type (tree);
static void warn_defaults_to (location_t, int, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
/* T is a statement. Add it to the statement-tree. This is the
C/ObjC version--C++ has a slightly different version of this
function. */
tree
add_stmt (tree t)
{
enum tree_code code = TREE_CODE (t);
if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR)
{
if (!EXPR_HAS_LOCATION (t))
SET_EXPR_LOCATION (t, input_location);
}
if (code == LABEL_EXPR || code == CASE_LABEL_EXPR)
STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1;
/* Add T to the statement-tree. Non-side-effect statements need to be
recorded during statement expressions. */
if (!building_stmt_list_p ())
push_stmt_list ();
append_to_statement_list_force (t, &cur_stmt_list);
return t;
}
/* Build a pointer type using the default pointer mode. */
static tree
c_build_pointer_type (tree to_type)
{
addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC
: TYPE_ADDR_SPACE (to_type);
machine_mode pointer_mode;
if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode)
pointer_mode = targetm.addr_space.pointer_mode (as);
else
pointer_mode = c_default_pointer_mode;
return build_pointer_type_for_mode (to_type, pointer_mode, false);
}
/* Return true if we will want to say something if a goto statement
crosses DECL. */
static bool
decl_jump_unsafe (tree decl)
{
if (error_operand_p (decl))
return false;
/* Always warn about crossing variably modified types. */
if ((VAR_P (decl) || TREE_CODE (decl) == TYPE_DECL)
&& variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
return true;
/* Otherwise, only warn if -Wgoto-misses-init and this is an
initialized automatic decl. */
if (warn_jump_misses_init
&& VAR_P (decl)
&& !TREE_STATIC (decl)
&& DECL_INITIAL (decl) != NULL_TREE)
return true;
return false;
}
void
c_print_identifier (FILE *file, tree node, int indent)
{
void (*save) (enum c_oracle_request, tree identifier);
/* Temporarily hide any binding oracle. Without this, calls to
debug_tree from the debugger will end up calling into the oracle,
making for a confusing debug session. As the oracle isn't needed
here for normal operation, it's simplest to suppress it. */
save = c_binding_oracle;
c_binding_oracle = NULL;
print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4);
print_node (file, "tag", I_TAG_DECL (node), indent + 4);
print_node (file, "label", I_LABEL_DECL (node), indent + 4);
if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN)
{
tree rid = ridpointers[C_RID_CODE (node)];
indent_to (file, indent + 4);
fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"",
(void *) rid, IDENTIFIER_POINTER (rid));
}
c_binding_oracle = save;
}
/* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL,
which may be any of several kinds of DECL or TYPE or error_mark_node,
in the scope SCOPE. */
static void
bind (tree name, tree decl, struct c_scope *scope, bool invisible,
bool nested, location_t locus)
{
struct c_binding *b, **here;
if (binding_freelist)
{
b = binding_freelist;
binding_freelist = b->prev;
}
else
b = ggc_alloc<c_binding> ();
b->shadowed = 0;
b->decl = decl;
b->id = name;
b->depth = scope->depth;
b->invisible = invisible;
b->nested = nested;
b->inner_comp = 0;
b->in_struct = 0;
b->locus = locus;
b->u.type = NULL;
b->prev = scope->bindings;
scope->bindings = b;
if (decl_jump_unsafe (decl))
scope->has_jump_unsafe_decl = 1;
if (!name)
return;
switch (TREE_CODE (decl))
{
case LABEL_DECL: here = &I_LABEL_BINDING (name); break;
case ENUMERAL_TYPE:
case UNION_TYPE:
case RECORD_TYPE: here = &I_TAG_BINDING (name); break;
case VAR_DECL:
case FUNCTION_DECL:
case TYPE_DECL:
case CONST_DECL:
case PARM_DECL:
case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break;
default:
gcc_unreachable ();
}
/* Locate the appropriate place in the chain of shadowed decls
to insert this binding. Normally, scope == current_scope and
this does nothing. */
while (*here && (*here)->depth > scope->depth)
here = &(*here)->shadowed;
b->shadowed = *here;
*here = b;
}
/* Clear the binding structure B, stick it on the binding_freelist,
and return the former value of b->prev. This is used by pop_scope
and get_parm_info to iterate destructively over all the bindings
from a given scope. */
static struct c_binding *
free_binding_and_advance (struct c_binding *b)
{
struct c_binding *prev = b->prev;
memset (b, 0, sizeof (struct c_binding));
b->prev = binding_freelist;
binding_freelist = b;
return prev;
}
/* Bind a label. Like bind, but skip fields which aren't used for
labels, and add the LABEL_VARS value. */
static void
bind_label (tree name, tree label, struct c_scope *scope,
struct c_label_vars *label_vars)
{
struct c_binding *b;
bind (name, label, scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
scope->has_label_bindings = true;
b = scope->bindings;
gcc_assert (b->decl == label);
label_vars->shadowed = b->u.label;
b->u.label = label_vars;
}
/* Hook called at end of compilation to assume 1 elt
for a file-scope tentative array defn that wasn't complete before. */
void
c_finish_incomplete_decl (tree decl)
{
if (VAR_P (decl))
{
tree type = TREE_TYPE (decl);
if (type != error_mark_node
&& TREE_CODE (type) == ARRAY_TYPE
&& !DECL_EXTERNAL (decl)
&& TYPE_DOMAIN (type) == NULL_TREE)
{
warning_at (DECL_SOURCE_LOCATION (decl),
0, "array %q+D assumed to have one element", decl);
complete_array_type (&TREE_TYPE (decl), NULL_TREE, true);
relayout_decl (decl);
}
}
}
/* Record that inline function FUNC contains a reference (location
LOC) to static DECL (file-scope or function-local according to
TYPE). */
void
record_inline_static (location_t loc, tree func, tree decl,
enum c_inline_static_type type)
{
c_inline_static *csi = ggc_alloc<c_inline_static> ();
csi->location = loc;
csi->function = func;
csi->static_decl = decl;
csi->type = type;
csi->next = c_inline_statics;
c_inline_statics = csi;
}
/* Check for references to static declarations in inline functions at
the end of the translation unit and diagnose them if the functions
are still inline definitions. */
static void
check_inline_statics (void)
{
struct c_inline_static *csi;
for (csi = c_inline_statics; csi; csi = csi->next)
{
if (DECL_EXTERNAL (csi->function))
switch (csi->type)
{
case csi_internal:
pedwarn (csi->location, 0,
"%qD is static but used in inline function %qD "
"which is not static", csi->static_decl, csi->function);
break;
case csi_modifiable:
pedwarn (csi->location, 0,
"%q+D is static but declared in inline function %qD "
"which is not static", csi->static_decl, csi->function);
break;
default:
gcc_unreachable ();
}
}
c_inline_statics = NULL;
}
/* Fill in a c_spot_bindings structure. If DEFINING is true, set it
for the current state, otherwise set it to uninitialized. */
static void
set_spot_bindings (struct c_spot_bindings *p, bool defining)
{
if (defining)
{
p->scope = current_scope;
p->bindings_in_scope = current_scope->bindings;
}
else
{
p->scope = NULL;
p->bindings_in_scope = NULL;
}
p->stmt_exprs = 0;
p->left_stmt_expr = false;
}
/* Update spot bindings P as we pop out of SCOPE. Return true if we
should push decls for a label. */
static bool
update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p)
{
if (p->scope != scope)
{
/* This label or goto is defined in some other scope, or it is a
label which is not yet defined. There is nothing to
update. */
return false;
}
/* Adjust the spot bindings to refer to the bindings already defined
in the enclosing scope. */
p->scope = scope->outer;
p->bindings_in_scope = p->scope->bindings;
return true;
}
/* The Objective-C front-end often needs to determine the current scope. */
void *
objc_get_current_scope (void)
{
return current_scope;
}
/* The following function is used only by Objective-C. It needs to live here
because it accesses the innards of c_scope. */
void
objc_mark_locals_volatile (void *enclosing_blk)
{
struct c_scope *scope;
struct c_binding *b;
for (scope = current_scope;
scope && scope != enclosing_blk;
scope = scope->outer)
{
for (b = scope->bindings; b; b = b->prev)
objc_volatilize_decl (b->decl);
/* Do not climb up past the current function. */
if (scope->function_body)
break;
}
}
/* Return true if we are in the global binding level. */
bool
global_bindings_p (void)
{
return current_scope == file_scope;
}
void
keep_next_level (void)
{
keep_next_level_flag = true;
}
/* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */
void
set_float_const_decimal64 (void)
{
current_scope->float_const_decimal64 = true;
}
/* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */
void
clear_float_const_decimal64 (void)
{
current_scope->float_const_decimal64 = false;
}
/* Return nonzero if an unsuffixed float constant is _Decimal64. */
bool
float_const_decimal64_p (void)
{
return current_scope->float_const_decimal64;
}
/* Identify this scope as currently being filled with parameters. */
void
declare_parm_level (void)
{
current_scope->parm_flag = true;
}
void
push_scope (void)
{
if (next_is_function_body)
{
/* This is the transition from the parameters to the top level
of the function body. These are the same scope
(C99 6.2.1p4,6) so we do not push another scope structure.
next_is_function_body is set only by store_parm_decls, which
in turn is called when and only when we are about to
encounter the opening curly brace for the function body.
The outermost block of a function always gets a BLOCK node,
because the debugging output routines expect that each
function has at least one BLOCK. */
current_scope->parm_flag = false;
current_scope->function_body = true;
current_scope->keep = true;
current_scope->outer_function = current_function_scope;
current_function_scope = current_scope;
keep_next_level_flag = false;
next_is_function_body = false;
/* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */
if (current_scope->outer)
current_scope->float_const_decimal64
= current_scope->outer->float_const_decimal64;
else
current_scope->float_const_decimal64 = false;
}
else
{
struct c_scope *scope;
if (scope_freelist)
{
scope = scope_freelist;
scope_freelist = scope->outer;
}
else
scope = ggc_cleared_alloc<c_scope> ();
/* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */
if (current_scope)
scope->float_const_decimal64 = current_scope->float_const_decimal64;
else
scope->float_const_decimal64 = false;
scope->keep = keep_next_level_flag;
scope->outer = current_scope;
scope->depth = current_scope ? (current_scope->depth + 1) : 0;
/* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but
possible. */
if (current_scope && scope->depth == 0)
{
scope->depth--;
sorry ("GCC supports only %u nested scopes", scope->depth);
}
current_scope = scope;
keep_next_level_flag = false;
}
}
/* This is called when we are leaving SCOPE. For each label defined
in SCOPE, add any appropriate decls to its decls_in_scope fields.
These are the decls whose initialization will be skipped by a goto
later in the function. */
static void
update_label_decls (struct c_scope *scope)
{
struct c_scope *s;
s = scope;
while (s != NULL)
{
if (s->has_label_bindings)
{
struct c_binding *b;
for (b = s->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
struct c_binding *b1;
bool hjud;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
b1 = label_vars->label_bindings.bindings_in_scope;
if (label_vars->label_bindings.scope == NULL)
hjud = false;
else
hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl;
if (update_spot_bindings (scope, &label_vars->label_bindings))
{
/* This label is defined in this scope. */
if (hjud)
{
for (; b1 != NULL; b1 = b1->prev)
{
/* A goto from later in the function to this
label will never see the initialization
of B1, if any. Save it to issue a
warning if needed. */
if (decl_jump_unsafe (b1->decl))
vec_safe_push(label_vars->decls_in_scope, b1->decl);
}
}
}
/* Update the bindings of any goto statements associated
with this label. */
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
update_spot_bindings (scope, &g->goto_bindings);
}
}
/* Don't search beyond the current function. */
if (s == current_function_scope)
break;
s = s->outer;
}
}
/* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */
static void
set_type_context (tree type, tree context)
{
for (type = TYPE_MAIN_VARIANT (type); type;
type = TYPE_NEXT_VARIANT (type))
TYPE_CONTEXT (type) = context;
}
/* Exit a scope. Restore the state of the identifier-decl mappings
that were in effect when this scope was entered. Return a BLOCK
node containing all the DECLs in this scope that are of interest
to debug info generation. */
tree
pop_scope (void)
{
struct c_scope *scope = current_scope;
tree block, context, p;
struct c_binding *b;
bool functionbody = scope->function_body;
bool keep = functionbody || scope->keep || scope->bindings;
update_label_decls (scope);
/* If appropriate, create a BLOCK to record the decls for the life
of this function. */
block = NULL_TREE;
if (keep)
{
block = make_node (BLOCK);
BLOCK_SUBBLOCKS (block) = scope->blocks;
TREE_USED (block) = 1;
/* In each subblock, record that this is its superior. */
for (p = scope->blocks; p; p = BLOCK_CHAIN (p))
BLOCK_SUPERCONTEXT (p) = block;
BLOCK_VARS (block) = NULL_TREE;
}
/* The TYPE_CONTEXTs for all of the tagged types belonging to this
scope must be set so that they point to the appropriate
construct, i.e. either to the current FUNCTION_DECL node, or
else to the BLOCK node we just constructed.
Note that for tagged types whose scope is just the formal
parameter list for some function type specification, we can't
properly set their TYPE_CONTEXTs here, because we don't have a
pointer to the appropriate FUNCTION_TYPE node readily available
to us. For those cases, the TYPE_CONTEXTs of the relevant tagged
type nodes get set in `grokdeclarator' as soon as we have created
the FUNCTION_TYPE node which will represent the "scope" for these
"parameter list local" tagged types. */
if (scope->function_body)
context = current_function_decl;
else if (scope == file_scope)
{
tree file_decl
= build_translation_unit_decl (get_identifier (main_input_filename));
context = file_decl;
debug_hooks->register_main_translation_unit (file_decl);
}
else
context = block;
/* Clear all bindings in this scope. */
for (b = scope->bindings; b; b = free_binding_and_advance (b))
{
p = b->decl;
switch (TREE_CODE (p))
{
case LABEL_DECL:
/* Warnings for unused labels, errors for undefined labels. */
if (TREE_USED (p) && !DECL_INITIAL (p))
{
error ("label %q+D used but not defined", p);
DECL_INITIAL (p) = error_mark_node;
}
else
warn_for_unused_label (p);
/* Labels go in BLOCK_VARS. */
DECL_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
gcc_assert (I_LABEL_BINDING (b->id) == b);
I_LABEL_BINDING (b->id) = b->shadowed;
/* Also pop back to the shadowed label_vars. */
release_tree_vector (b->u.label->decls_in_scope);
b->u.label = b->u.label->shadowed;
break;
case ENUMERAL_TYPE:
case UNION_TYPE:
case RECORD_TYPE:
set_type_context (p, context);
/* Types may not have tag-names, in which case the type
appears in the bindings list with b->id NULL. */
if (b->id)
{
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
break;
case FUNCTION_DECL:
/* Propagate TREE_ADDRESSABLE from nested functions to their
containing functions. */
if (!TREE_ASM_WRITTEN (p)
&& DECL_INITIAL (p) != NULL_TREE
&& TREE_ADDRESSABLE (p)
&& DECL_ABSTRACT_ORIGIN (p) != NULL_TREE
&& DECL_ABSTRACT_ORIGIN (p) != p)
TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1;
if (!TREE_PUBLIC (p)
&& !DECL_INITIAL (p)
&& !b->nested
&& scope != file_scope
&& scope != external_scope)
{
error ("nested function %q+D declared but never defined", p);
undef_nested_function = true;
}
else if (DECL_DECLARED_INLINE_P (p)
&& TREE_PUBLIC (p)
&& !DECL_INITIAL (p))
{
/* C99 6.7.4p6: "a function with external linkage... declared
with an inline function specifier ... shall also be defined
in the same translation unit." */
if (!flag_gnu89_inline
&& !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p))
&& scope == external_scope)
pedwarn (input_location, 0,
"inline function %q+D declared but never defined", p);
DECL_EXTERNAL (p) = 1;
}
goto common_symbol;
case VAR_DECL:
/* Warnings for unused variables. */
if ((!TREE_USED (p) || !DECL_READ_P (p))
&& !TREE_NO_WARNING (p)
&& !DECL_IN_SYSTEM_HEADER (p)
&& DECL_NAME (p)
&& !DECL_ARTIFICIAL (p)
&& scope != file_scope
&& scope != external_scope)
{
if (!TREE_USED (p))
warning (OPT_Wunused_variable, "unused variable %q+D", p);
else if (DECL_CONTEXT (p) == current_function_decl)
warning_at (DECL_SOURCE_LOCATION (p),
OPT_Wunused_but_set_variable,
"variable %qD set but not used", p);
}
if (b->inner_comp)
{
error ("type of array %q+D completed incompatibly with"
" implicit initialization", p);
}
/* Fall through. */
case TYPE_DECL:
case CONST_DECL:
common_symbol:
/* All of these go in BLOCK_VARS, but only if this is the
binding in the home scope. */
if (!b->nested)
{
DECL_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
}
else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope)
{
/* For block local externs add a special
DECL_EXTERNAL decl for debug info generation. */
tree extp = copy_node (p);
DECL_EXTERNAL (extp) = 1;
TREE_STATIC (extp) = 0;
TREE_PUBLIC (extp) = 1;
DECL_INITIAL (extp) = NULL_TREE;
DECL_LANG_SPECIFIC (extp) = NULL;
DECL_CONTEXT (extp) = current_function_decl;
if (TREE_CODE (p) == FUNCTION_DECL)
{
DECL_RESULT (extp) = NULL_TREE;
DECL_SAVED_TREE (extp) = NULL_TREE;
DECL_STRUCT_FUNCTION (extp) = NULL;
}
if (b->locus != UNKNOWN_LOCATION)
DECL_SOURCE_LOCATION (extp) = b->locus;
DECL_CHAIN (extp) = BLOCK_VARS (block);
BLOCK_VARS (block) = extp;
}
/* If this is the file scope set DECL_CONTEXT of each decl to
the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p
work. */
if (scope == file_scope)
{
DECL_CONTEXT (p) = context;
if (TREE_CODE (p) == TYPE_DECL
&& TREE_TYPE (p) != error_mark_node)
set_type_context (TREE_TYPE (p), context);
}
gcc_fallthrough ();
/* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have
already been put there by store_parm_decls. Unused-
parameter warnings are handled by function.c.
error_mark_node obviously does not go in BLOCK_VARS and
does not get unused-variable warnings. */
case PARM_DECL:
case ERROR_MARK:
/* It is possible for a decl not to have a name. We get
here with b->id NULL in this case. */
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
if (b->shadowed && b->shadowed->u.type)
TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type;
}
break;
default:
gcc_unreachable ();
}
}
/* Dispose of the block that we just made inside some higher level. */
if ((scope->function_body || scope == file_scope) && context)
{
DECL_INITIAL (context) = block;
BLOCK_SUPERCONTEXT (block) = context;
}
else if (scope->outer)
{
if (block)
SCOPE_LIST_APPEND (scope->outer, blocks, block);
/* If we did not make a block for the scope just exited, any
blocks made for inner scopes must be carried forward so they
will later become subblocks of something else. */
else if (scope->blocks)
SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks);
}
/* Pop the current scope, and free the structure for reuse. */
current_scope = scope->outer;
if (scope->function_body)
current_function_scope = scope->outer_function;
memset (scope, 0, sizeof (struct c_scope));
scope->outer = scope_freelist;
scope_freelist = scope;
return block;
}
void
push_file_scope (void)
{
tree decl;
if (file_scope)
return;
push_scope ();
file_scope = current_scope;
start_fname_decls ();
for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl))
bind (DECL_NAME (decl), decl, file_scope,
/*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl));
}
void
pop_file_scope (void)
{
/* In case there were missing closebraces, get us back to the global
binding level. */
while (current_scope != file_scope)
pop_scope ();
/* __FUNCTION__ is defined at file scope (""). This
call may not be necessary as my tests indicate it
still works without it. */
finish_fname_decls ();
check_inline_statics ();
/* This is the point to write out a PCH if we're doing that.
In that case we do not want to do anything else. */
if (pch_file)
{
c_common_write_pch ();
/* Ensure even the callers don't try to finalize the CU. */
flag_syntax_only = 1;
return;
}
/* Pop off the file scope and close this translation unit. */
pop_scope ();
file_scope = 0;
maybe_apply_pending_pragma_weaks ();
}
/* Adjust the bindings for the start of a statement expression. */
void
c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings)
{
struct c_scope *scope;
for (scope = current_scope; scope != NULL; scope = scope->outer)
{
struct c_binding *b;
if (!scope->has_label_bindings)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
++label_vars->label_bindings.stmt_exprs;
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
++g->goto_bindings.stmt_exprs;
}
}
if (switch_bindings != NULL)
++switch_bindings->stmt_exprs;
}
/* Adjust the bindings for the end of a statement expression. */
void
c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings)
{
struct c_scope *scope;
for (scope = current_scope; scope != NULL; scope = scope->outer)
{
struct c_binding *b;
if (!scope->has_label_bindings)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
--label_vars->label_bindings.stmt_exprs;
if (label_vars->label_bindings.stmt_exprs < 0)
{
label_vars->label_bindings.left_stmt_expr = true;
label_vars->label_bindings.stmt_exprs = 0;
}
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
--g->goto_bindings.stmt_exprs;
if (g->goto_bindings.stmt_exprs < 0)
{
g->goto_bindings.left_stmt_expr = true;
g->goto_bindings.stmt_exprs = 0;
}
}
}
}
if (switch_bindings != NULL)
{
--switch_bindings->stmt_exprs;
gcc_assert (switch_bindings->stmt_exprs >= 0);
}
}
/* Push a definition or a declaration of struct, union or enum tag "name".
"type" should be the type node.
We assume that the tag "name" is not already defined, and has a location
of LOC.
Note that the definition may really be just a forward reference.
In that case, the TYPE_SIZE will be zero. */
static void
pushtag (location_t loc, tree name, tree type)
{
/* Record the identifier as the type's name if it has none. */
if (name && !TYPE_NAME (type))
TYPE_NAME (type) = name;
bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc);
/* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the
tagged type we just added to the current scope. This fake
NULL-named TYPE_DECL node helps dwarfout.c to know when it needs
to output a representation of a tagged type, and it also gives
us a convenient place to record the "scope start" address for the
tagged type. */
TYPE_STUB_DECL (type) = pushdecl (build_decl (loc,
TYPE_DECL, NULL_TREE, type));
/* An approximation for now, so we can tell this is a function-scope tag.
This will be updated in pop_scope. */
TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type));
if (warn_cxx_compat && name != NULL_TREE)
{
struct c_binding *b = I_SYMBOL_BINDING (name);
if (b != NULL
&& b->decl != NULL_TREE
&& TREE_CODE (b->decl) == TYPE_DECL
&& (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
&& (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl))
!= TYPE_MAIN_VARIANT (type)))
{
if (warning_at (loc, OPT_Wc___compat,
("using %qD as both a typedef and a tag is "
"invalid in C++"), b->decl)
&& b->locus != UNKNOWN_LOCATION)
inform (b->locus, "originally defined here");
}
}
}
/* An exported interface to pushtag. This is used by the gdb plugin's
binding oracle to introduce a new tag binding. */
void
c_pushtag (location_t loc, tree name, tree type)
{
pushtag (loc, name, type);
}
/* An exported interface to bind a declaration. LOC is the location
to use. DECL is the declaration to bind. The decl's name is used
to determine how it is bound. If DECL is a VAR_DECL, then
IS_GLOBAL determines whether the decl is put into the global (file
and external) scope or the current function's scope; if DECL is not
a VAR_DECL then it is always put into the file scope. */
void
c_bind (location_t loc, tree decl, bool is_global)
{
struct c_scope *scope;
bool nested = false;
if (!VAR_P (decl) || current_function_scope == NULL)
{
/* Types and functions are always considered to be global. */
scope = file_scope;
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
}
else if (is_global)
{
/* Also bind it into the external scope. */
bind (DECL_NAME (decl), decl, external_scope, true, false, loc);
nested = true;
scope = file_scope;
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
}
else
{
DECL_CONTEXT (decl) = current_function_decl;
TREE_PUBLIC (decl) = 0;
scope = current_function_scope;
}
bind (DECL_NAME (decl), decl, scope, false, nested, loc);
}
/* Subroutine of compare_decls. Allow harmless mismatches in return
and argument types provided that the type modes match. This function
return a unified type given a suitable match, and 0 otherwise. */
static tree
match_builtin_function_types (tree newtype, tree oldtype)
{
tree newrettype, oldrettype;
tree newargs, oldargs;
tree trytype, tryargs;
/* Accept the return type of the new declaration if same modes. */
oldrettype = TREE_TYPE (oldtype);
newrettype = TREE_TYPE (newtype);
if (TYPE_MODE (oldrettype) != TYPE_MODE (newrettype))
return NULL_TREE;
oldargs = TYPE_ARG_TYPES (oldtype);
newargs = TYPE_ARG_TYPES (newtype);
tryargs = newargs;
while (oldargs || newargs)
{
if (!oldargs
|| !newargs
|| !TREE_VALUE (oldargs)
|| !TREE_VALUE (newargs)
|| TYPE_MODE (TREE_VALUE (oldargs))
!= TYPE_MODE (TREE_VALUE (newargs)))
return NULL_TREE;
oldargs = TREE_CHAIN (oldargs);
newargs = TREE_CHAIN (newargs);
}
trytype = build_function_type (newrettype, tryargs);
/* Allow declaration to change transaction_safe attribute. */
tree oldattrs = TYPE_ATTRIBUTES (oldtype);
tree oldtsafe = lookup_attribute ("transaction_safe", oldattrs);
tree newattrs = TYPE_ATTRIBUTES (newtype);
tree newtsafe = lookup_attribute ("transaction_safe", newattrs);
if (oldtsafe && !newtsafe)
oldattrs = remove_attribute ("transaction_safe", oldattrs);
else if (newtsafe && !oldtsafe)
oldattrs = tree_cons (get_identifier ("transaction_safe"),
NULL_TREE, oldattrs);
return build_type_attribute_variant (trytype, oldattrs);
}
/* Subroutine of diagnose_mismatched_decls. Check for function type
mismatch involving an empty arglist vs a nonempty one and give clearer
diagnostics. */
static void
diagnose_arglist_conflict (tree newdecl, tree olddecl,
tree newtype, tree oldtype)
{
tree t;
if (TREE_CODE (olddecl) != FUNCTION_DECL
|| !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype))
|| !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == NULL_TREE)
|| (!prototype_p (newtype) && DECL_INITIAL (newdecl) == NULL_TREE)))
return;
t = TYPE_ARG_TYPES (oldtype);
if (t == NULL_TREE)
t = TYPE_ARG_TYPES (newtype);
for (; t; t = TREE_CHAIN (t))
{
tree type = TREE_VALUE (t);
if (TREE_CHAIN (t) == NULL_TREE
&& TYPE_MAIN_VARIANT (type) != void_type_node)
{
inform (input_location, "a parameter list with an ellipsis can%'t match "
"an empty parameter name list declaration");
break;
}
if (c_type_promotes_to (type) != type)
{
inform (input_location, "an argument type that has a default promotion can%'t match "
"an empty parameter name list declaration");
break;
}
}
}
/* Another subroutine of diagnose_mismatched_decls. OLDDECL is an
old-style function definition, NEWDECL is a prototype declaration.
Diagnose inconsistencies in the argument list. Returns TRUE if
the prototype is compatible, FALSE if not. */
static bool
validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype)
{
tree newargs, oldargs;
int i;
#define END_OF_ARGLIST(t) ((t) == void_type_node)
oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype);
newargs = TYPE_ARG_TYPES (newtype);
i = 1;
for (;;)
{
tree oldargtype = TREE_VALUE (oldargs);
tree newargtype = TREE_VALUE (newargs);
if (oldargtype == error_mark_node || newargtype == error_mark_node)
return false;
oldargtype = (TYPE_ATOMIC (oldargtype)
? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (oldargtype));
newargtype = (TYPE_ATOMIC (newargtype)
? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (newargtype));
if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype))
break;
/* Reaching the end of just one list means the two decls don't
agree on the number of arguments. */
if (END_OF_ARGLIST (oldargtype))
{
error ("prototype for %q+D declares more arguments "
"than previous old-style definition", newdecl);
return false;
}
else if (END_OF_ARGLIST (newargtype))
{
error ("prototype for %q+D declares fewer arguments "
"than previous old-style definition", newdecl);
return false;
}
/* Type for passing arg must be consistent with that declared
for the arg. */
else if (!comptypes (oldargtype, newargtype))
{
error ("prototype for %q+D declares argument %d"
" with incompatible type",
newdecl, i);
return false;
}
oldargs = TREE_CHAIN (oldargs);
newargs = TREE_CHAIN (newargs);
i++;
}
/* If we get here, no errors were found, but do issue a warning
for this poor-style construct. */
warning (0, "prototype for %q+D follows non-prototype definition",
newdecl);
return true;
#undef END_OF_ARGLIST
}
/* Subroutine of diagnose_mismatched_decls. Report the location of DECL,
first in a pair of mismatched declarations, using the diagnostic
function DIAG. */
static void
locate_old_decl (tree decl)
{
if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl)
&& !C_DECL_DECLARED_BUILTIN (decl))
;
else if (DECL_INITIAL (decl))
inform (input_location, "previous definition of %q+D was here", decl);
else if (C_DECL_IMPLICIT (decl))
inform (input_location, "previous implicit declaration of %q+D was here", decl);
else
inform (input_location, "previous declaration of %q+D was here", decl);
}
/* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL.
Returns true if the caller should proceed to merge the two, false
if OLDDECL should simply be discarded. As a side effect, issues
all necessary diagnostics for invalid or poor-style combinations.
If it returns true, writes the types of NEWDECL and OLDDECL to
*NEWTYPEP and *OLDTYPEP - these may have been adjusted from
TREE_TYPE (NEWDECL, OLDDECL) respectively. */
static bool
diagnose_mismatched_decls (tree newdecl, tree olddecl,
tree *newtypep, tree *oldtypep)
{
tree newtype, oldtype;
bool pedwarned = false;
bool warned = false;
bool retval = true;
#define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \
&& DECL_EXTERNAL (DECL))
/* If we have error_mark_node for either decl or type, just discard
the previous decl - we're in an error cascade already. */
if (olddecl == error_mark_node || newdecl == error_mark_node)
return false;
*oldtypep = oldtype = TREE_TYPE (olddecl);
*newtypep = newtype = TREE_TYPE (newdecl);
if (oldtype == error_mark_node || newtype == error_mark_node)
return false;
/* Two different categories of symbol altogether. This is an error
unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */
if (TREE_CODE (olddecl) != TREE_CODE (newdecl))
{
if (!(TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)))
{
error ("%q+D redeclared as different kind of symbol", newdecl);
locate_old_decl (olddecl);
}
else if (TREE_PUBLIC (newdecl))
warning (OPT_Wbuiltin_declaration_mismatch,
"built-in function %q+D declared as non-function",
newdecl);
else
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", newdecl);
return false;
}
/* Enumerators have no linkage, so may only be declared once in a
given scope. */
if (TREE_CODE (olddecl) == CONST_DECL)
{
error ("redeclaration of enumerator %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
if (!comptypes (oldtype, newtype))
{
if (TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))
{
/* Accept harmless mismatch in function types.
This is for the ffs and fprintf builtins. */
tree trytype = match_builtin_function_types (newtype, oldtype);
if (trytype && comptypes (newtype, trytype))
*oldtypep = oldtype = trytype;
else
{
/* If types don't match for a built-in, throw away the
built-in. No point in calling locate_old_decl here, it
won't print anything. */
warning (OPT_Wbuiltin_declaration_mismatch,
"conflicting types for built-in function %q+D",
newdecl);
return false;
}
}
else if (TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_IS_BUILTIN (olddecl))
{
/* A conflicting function declaration for a predeclared
function that isn't actually built in. Objective C uses
these. The new declaration silently overrides everything
but the volatility (i.e. noreturn) indication. See also
below. FIXME: Make Objective C use normal builtins. */
TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
return false;
}
/* Permit void foo (...) to match int foo (...) if the latter is
the definition and implicit int was used. See
c-torture/compile/920625-2.c. */
else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl)
&& TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node
&& C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl))
{
pedwarned = pedwarn (input_location, 0,
"conflicting types for %q+D", newdecl);
/* Make sure we keep void as the return type. */
TREE_TYPE (newdecl) = *newtypep = newtype = oldtype;
C_FUNCTION_IMPLICIT_INT (newdecl) = 0;
}
/* Permit void foo (...) to match an earlier call to foo (...) with
no declared type (thus, implicitly int). */
else if (TREE_CODE (newdecl) == FUNCTION_DECL
&& TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node
&& C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl))
{
pedwarned = pedwarn (input_location, 0,
"conflicting types for %q+D", newdecl);
/* Make sure we keep void as the return type. */
TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype;
}
else
{
int new_quals = TYPE_QUALS (newtype);
int old_quals = TYPE_QUALS (oldtype);
if (new_quals != old_quals)
{
addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals);
addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals);
if (new_addr != old_addr)
{
if (ADDR_SPACE_GENERIC_P (new_addr))
error ("conflicting named address spaces (generic vs %s) "
"for %q+D",
c_addr_space_name (old_addr), newdecl);
else if (ADDR_SPACE_GENERIC_P (old_addr))
error ("conflicting named address spaces (%s vs generic) "
"for %q+D",
c_addr_space_name (new_addr), newdecl);
else
error ("conflicting named address spaces (%s vs %s) "
"for %q+D",
c_addr_space_name (new_addr),
c_addr_space_name (old_addr),
newdecl);
}
if (CLEAR_QUAL_ADDR_SPACE (new_quals)
!= CLEAR_QUAL_ADDR_SPACE (old_quals))
error ("conflicting type qualifiers for %q+D", newdecl);
}
else
error ("conflicting types for %q+D", newdecl);
diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype);
locate_old_decl (olddecl);
return false;
}
}
/* Redeclaration of a type is a constraint violation (6.7.2.3p1),
but silently ignore the redeclaration if either is in a system
header. (Conflicting redeclarations were handled above.) This
is allowed for C11 if the types are the same, not just
compatible. */
if (TREE_CODE (newdecl) == TYPE_DECL)
{
bool types_different = false;
int comptypes_result;
comptypes_result
= comptypes_check_different_types (oldtype, newtype, &types_different);
if (comptypes_result != 1 || types_different)
{
error ("redefinition of typedef %q+D with different type", newdecl);
locate_old_decl (olddecl);
return false;
}
if (DECL_IN_SYSTEM_HEADER (newdecl)
|| DECL_IN_SYSTEM_HEADER (olddecl)
|| TREE_NO_WARNING (newdecl)
|| TREE_NO_WARNING (olddecl))
return true; /* Allow OLDDECL to continue in use. */
if (variably_modified_type_p (newtype, NULL))
{
error ("redefinition of typedef %q+D with variably modified type",
newdecl);
locate_old_decl (olddecl);
}
else if (pedwarn_c99 (input_location, OPT_Wpedantic,
"redefinition of typedef %q+D", newdecl))
locate_old_decl (olddecl);
return true;
}
/* Function declarations can either be 'static' or 'extern' (no
qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore
can never conflict with each other on account of linkage
(6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but
gnu89 mode permits two definitions if one is 'extern inline' and
one is not. The non- extern-inline definition supersedes the
extern-inline definition. */
else if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If you declare a built-in function name as static, or
define the built-in with an old-style definition (so we
can't validate the argument list) the built-in definition is
overridden, but optionally warn this was a bad choice of name. */
if (DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)
&& (!TREE_PUBLIC (newdecl)
|| (DECL_INITIAL (newdecl)
&& !prototype_p (TREE_TYPE (newdecl)))))
{
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", newdecl);
/* Discard the old built-in function. */
return false;
}
if (DECL_INITIAL (newdecl))
{
if (DECL_INITIAL (olddecl))
{
/* If both decls are in the same TU and the new declaration
isn't overriding an extern inline reject the new decl.
In c99, no overriding is allowed in the same translation
unit. */
if ((!DECL_EXTERN_INLINE (olddecl)
|| DECL_EXTERN_INLINE (newdecl)
|| (!flag_gnu89_inline
&& (!DECL_DECLARED_INLINE_P (olddecl)
|| !lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (olddecl)))
&& (!DECL_DECLARED_INLINE_P (newdecl)
|| !lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (newdecl))))
)
&& same_translation_unit_p (newdecl, olddecl))
{
error ("redefinition of %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
}
}
/* If we have a prototype after an old-style function definition,
the argument types must be checked specially. */
else if (DECL_INITIAL (olddecl)
&& !prototype_p (oldtype) && prototype_p (newtype)
&& TYPE_ACTUAL_ARG_TYPES (oldtype)
&& !validate_proto_after_old_defn (newdecl, newtype, oldtype))
{
locate_old_decl (olddecl);
return false;
}
/* A non-static declaration (even an "extern") followed by a
static declaration is undefined behavior per C99 6.2.2p3-5,7.
The same is true for a static forward declaration at block
scope followed by a non-static declaration/definition at file
scope. Static followed by non-static at the same scope is
not undefined behavior, and is the most convenient way to get
some effects (see e.g. what unwind-dw2-fde-glibc.c does to
the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but
we do diagnose it if -Wtraditional. */
if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl))
{
/* Two exceptions to the rule. If olddecl is an extern
inline, or a predeclared function that isn't actually
built in, newdecl silently overrides olddecl. The latter
occur only in Objective C; see also above. (FIXME: Make
Objective C use normal builtins.) */
if (!DECL_IS_BUILTIN (olddecl)
&& !DECL_EXTERN_INLINE (olddecl))
{
error ("static declaration of %q+D follows "
"non-static declaration", newdecl);
locate_old_decl (olddecl);
}
return false;
}
else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl))
{
if (DECL_CONTEXT (olddecl))
{
error ("non-static declaration of %q+D follows "
"static declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
else if (warn_traditional)
{
warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
}
/* Make sure gnu_inline attribute is either not present, or
present on all inline decls. */
if (DECL_DECLARED_INLINE_P (olddecl)
&& DECL_DECLARED_INLINE_P (newdecl))
{
bool newa = lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (newdecl)) != NULL;
bool olda = lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (olddecl)) != NULL;
if (newa != olda)
{
error_at (input_location, "%<gnu_inline%> attribute present on %q+D",
newa ? newdecl : olddecl);
error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl),
"but not here");
}
}
}
else if (VAR_P (newdecl))
{
/* Only variables can be thread-local, and all declarations must
agree on this property. */
if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl))
{
/* Nothing to check. Since OLDDECL is marked threadprivate
and NEWDECL does not have a thread-local attribute, we
will merge the threadprivate attribute into NEWDECL. */
;
}
else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl))
{
if (DECL_THREAD_LOCAL_P (newdecl))
error ("thread-local declaration of %q+D follows "
"non-thread-local declaration", newdecl);
else
error ("non-thread-local declaration of %q+D follows "
"thread-local declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
/* Multiple initialized definitions are not allowed (6.9p3,5). */
if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl))
{
error ("redefinition of %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
/* Objects declared at file scope: if the first declaration had
external linkage (even if it was an external reference) the
second must have external linkage as well, or the behavior is
undefined. If the first declaration had internal linkage, then
the second must too, or else be an external reference (in which
case the composite declaration still has internal linkage).
As for function declarations, we warn about the static-then-
extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */
if (DECL_FILE_SCOPE_P (newdecl)
&& TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl))
{
if (DECL_EXTERNAL (newdecl))
{
if (!DECL_FILE_SCOPE_P (olddecl))
{
error ("extern declaration of %q+D follows "
"declaration with no linkage", newdecl);
locate_old_decl (olddecl);
return false;
}
else if (warn_traditional)
{
warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
}
else
{
if (TREE_PUBLIC (newdecl))
error ("non-static declaration of %q+D follows "
"static declaration", newdecl);
else
error ("static declaration of %q+D follows "
"non-static declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
}
/* Two objects with the same name declared at the same block
scope must both be external references (6.7p3). */
else if (!DECL_FILE_SCOPE_P (newdecl))
{
if (DECL_EXTERNAL (newdecl))
{
/* Extern with initializer at block scope, which will
already have received an error. */
}
else if (DECL_EXTERNAL (olddecl))
{
error ("declaration of %q+D with no linkage follows "
"extern declaration", newdecl);
locate_old_decl (olddecl);
}
else
{
error ("redeclaration of %q+D with no linkage", newdecl);
locate_old_decl (olddecl);
}
return false;
}
/* C++ does not permit a decl to appear multiple times at file
scope. */
if (warn_cxx_compat
&& DECL_FILE_SCOPE_P (newdecl)
&& !DECL_EXTERNAL (newdecl)
&& !DECL_EXTERNAL (olddecl))
warned |= warning_at (DECL_SOURCE_LOCATION (newdecl),
OPT_Wc___compat,
("duplicate declaration of %qD is "
"invalid in C++"),
newdecl);
}
/* warnings */
/* All decls must agree on a visibility. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS)
&& DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl)
&& DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl))
{
warned |= warning (0, "redeclaration of %q+D with different visibility "
"(old visibility preserved)", newdecl);
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
warned |= diagnose_mismatched_attributes (olddecl, newdecl);
else /* PARM_DECL, VAR_DECL */
{
/* Redeclaration of a parameter is a constraint violation (this is
not explicitly stated, but follows from C99 6.7p3 [no more than
one declaration of the same identifier with no linkage in the
same scope, except type tags] and 6.2.2p6 [parameters have no
linkage]). We must check for a forward parameter declaration,
indicated by TREE_ASM_WRITTEN on the old declaration - this is
an extension, the mandatory diagnostic for which is handled by
mark_forward_parm_decls. */
if (TREE_CODE (newdecl) == PARM_DECL
&& (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl)))
{
error ("redefinition of parameter %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
}
/* Optional warning for completely redundant decls. */
if (!warned && !pedwarned
&& warn_redundant_decls
/* Don't warn about a function declaration followed by a
definition. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))
/* Don't warn about redundant redeclarations of builtins. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
&& !DECL_BUILT_IN (newdecl)
&& DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl))
/* Don't warn about an extern followed by a definition. */
&& !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl))
/* Don't warn about forward parameter decls. */
&& !(TREE_CODE (newdecl) == PARM_DECL
&& TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
/* Don't warn about a variable definition following a declaration. */
&& !(VAR_P (newdecl)
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)))
{
warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D",
newdecl);
}
/* Report location of previous decl/defn. */
if (warned || pedwarned)
locate_old_decl (olddecl);
#undef DECL_EXTERN_INLINE
return retval;
}
/* Subroutine of duplicate_decls. NEWDECL has been found to be
consistent with OLDDECL, but carries new information. Merge the
new information into OLDDECL. This function issues no
diagnostics. */
static void
merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype)
{
bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL
&& DECL_INITIAL (newdecl) != NULL_TREE);
bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL
&& prototype_p (TREE_TYPE (newdecl)));
bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL
&& prototype_p (TREE_TYPE (olddecl)));
/* For real parm decl following a forward decl, rechain the old decl
in its new location and clear TREE_ASM_WRITTEN (it's not a
forward decl anymore). */
if (TREE_CODE (newdecl) == PARM_DECL
&& TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
{
struct c_binding *b, **here;
for (here = ¤t_scope->bindings; *here; here = &(*here)->prev)
if ((*here)->decl == olddecl)
goto found;
gcc_unreachable ();
found:
b = *here;
*here = b->prev;
b->prev = current_scope->bindings;
current_scope->bindings = b;
TREE_ASM_WRITTEN (olddecl) = 0;
}
DECL_ATTRIBUTES (newdecl)
= targetm.merge_decl_attributes (olddecl, newdecl);
/* For typedefs use the old type, as the new type's DECL_NAME points
at newdecl, which will be ggc_freed. */
if (TREE_CODE (newdecl) == TYPE_DECL)
{
/* But NEWTYPE might have an attribute, honor that. */
tree tem = newtype;
newtype = oldtype;
if (TYPE_USER_ALIGN (tem))
{
if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype))
SET_TYPE_ALIGN (newtype, TYPE_ALIGN (tem));
TYPE_USER_ALIGN (newtype) = true;
}
/* And remove the new type from the variants list. */
if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl)
{
tree remove = TREE_TYPE (newdecl);
for (tree t = TYPE_MAIN_VARIANT (remove); ;
t = TYPE_NEXT_VARIANT (t))
if (TYPE_NEXT_VARIANT (t) == remove)
{
TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove);
break;
}
}
}
/* Merge the data types specified in the two decls. */
TREE_TYPE (newdecl)
= TREE_TYPE (olddecl)
= composite_type (newtype, oldtype);
/* Lay the type out, unless already done. */
if (!comptypes (oldtype, TREE_TYPE (newdecl)))
{
if (TREE_TYPE (newdecl) != error_mark_node)
layout_type (TREE_TYPE (newdecl));
if (TREE_CODE (newdecl) != FUNCTION_DECL
&& TREE_CODE (newdecl) != TYPE_DECL
&& TREE_CODE (newdecl) != CONST_DECL)
layout_decl (newdecl, 0);
}
else
{
/* Since the type is OLDDECL's, make OLDDECL's size go with. */
DECL_SIZE (newdecl) = DECL_SIZE (olddecl);
DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl);
SET_DECL_MODE (newdecl, DECL_MODE (olddecl));
if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl))
{
SET_DECL_ALIGN (newdecl, DECL_ALIGN (olddecl));
DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl);
}
if (DECL_WARN_IF_NOT_ALIGN (olddecl)
> DECL_WARN_IF_NOT_ALIGN (newdecl))
SET_DECL_WARN_IF_NOT_ALIGN (newdecl,
DECL_WARN_IF_NOT_ALIGN (olddecl));
}
/* Keep the old rtl since we can safely use it. */
if (HAS_RTL_P (olddecl))
COPY_DECL_RTL (olddecl, newdecl);
/* Merge the type qualifiers. */
if (TREE_READONLY (newdecl))
TREE_READONLY (olddecl) = 1;
if (TREE_THIS_VOLATILE (newdecl))
TREE_THIS_VOLATILE (olddecl) = 1;
/* Merge deprecatedness. */
if (TREE_DEPRECATED (newdecl))
TREE_DEPRECATED (olddecl) = 1;
/* If a decl is in a system header and the other isn't, keep the one on the
system header. Otherwise, keep source location of definition rather than
declaration and of prototype rather than non-prototype unless that
prototype is built-in. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)
&& DECL_IN_SYSTEM_HEADER (olddecl)
&& !DECL_IN_SYSTEM_HEADER (newdecl) )
DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl);
else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)
&& DECL_IN_SYSTEM_HEADER (newdecl)
&& !DECL_IN_SYSTEM_HEADER (olddecl))
DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl);
else if ((DECL_INITIAL (newdecl) == NULL_TREE
&& DECL_INITIAL (olddecl) != NULL_TREE)
|| (old_is_prototype && !new_is_prototype
&& !C_DECL_BUILTIN_PROTOTYPE (olddecl)))
DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl);
/* Merge the initialization information. */
if (DECL_INITIAL (newdecl) == NULL_TREE)
DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
/* Merge the threadprivate attribute. */
if (VAR_P (olddecl) && C_DECL_THREADPRIVATE_P (olddecl))
C_DECL_THREADPRIVATE_P (newdecl) = 1;
if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS))
{
/* Copy the assembler name.
Currently, it can only be defined in the prototype. */
COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl);
/* Use visibility of whichever declaration had it specified */
if (DECL_VISIBILITY_SPECIFIED (olddecl))
{
DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl);
DECL_VISIBILITY_SPECIFIED (newdecl) = 1;
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl);
DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl);
DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl);
DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl)
|= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl);
TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl);
DECL_IS_OPERATOR_NEW (newdecl) |= DECL_IS_OPERATOR_NEW (olddecl);
TREE_READONLY (newdecl) |= TREE_READONLY (olddecl);
DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl);
DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl);
}
/* Merge the storage class information. */
merge_weak (newdecl, olddecl);
/* For functions, static overrides non-static. */
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl);
/* This is since we don't automatically
copy the attributes of NEWDECL into OLDDECL. */
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
/* If this clears `static', clear it in the identifier too. */
if (!TREE_PUBLIC (olddecl))
TREE_PUBLIC (DECL_NAME (olddecl)) = 0;
}
}
/* In c99, 'extern' declaration before (or after) 'inline' means this
function is not DECL_EXTERNAL, unless 'gnu_inline' attribute
is present. */
if (TREE_CODE (newdecl) == FUNCTION_DECL
&& !flag_gnu89_inline
&& (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
&& (!DECL_DECLARED_INLINE_P (newdecl)
|| !DECL_DECLARED_INLINE_P (olddecl)
|| !DECL_EXTERNAL (olddecl))
&& DECL_EXTERNAL (newdecl)
&& !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl))
&& !current_function_decl)
DECL_EXTERNAL (newdecl) = 0;
/* An inline definition following a static declaration is not
DECL_EXTERNAL. */
if (new_is_definition
&& (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
&& !TREE_PUBLIC (olddecl))
DECL_EXTERNAL (newdecl) = 0;
if (DECL_EXTERNAL (newdecl))
{
TREE_STATIC (newdecl) = TREE_STATIC (olddecl);
DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl);
/* An extern decl does not override previous storage class. */
TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl);
if (!DECL_EXTERNAL (newdecl))
{
DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl);
DECL_COMMON (newdecl) = DECL_COMMON (olddecl);
}
}
else
{
TREE_STATIC (olddecl) = TREE_STATIC (newdecl);
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If we're redefining a function previously defined as extern
inline, make sure we emit debug info for the inline before we
throw it away, in case it was inlined into a function that
hasn't been written out yet. */
if (new_is_definition && DECL_INITIAL (olddecl))
/* The new defn must not be inline. */
DECL_UNINLINABLE (newdecl) = 1;
else
{
/* If either decl says `inline', this fn is inline, unless
its definition was passed already. */
if (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
DECL_DECLARED_INLINE_P (newdecl) = 1;
DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl)
= (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl));
DECL_DISREGARD_INLINE_LIMITS (newdecl)
= DECL_DISREGARD_INLINE_LIMITS (olddecl)
= (DECL_DISREGARD_INLINE_LIMITS (newdecl)
|| DECL_DISREGARD_INLINE_LIMITS (olddecl));
}
if (DECL_BUILT_IN (olddecl))
{
/* If redeclaring a builtin function, it stays built in.
But it gets tagged as having been declared. */
DECL_BUILT_IN_CLASS (newdecl) = DECL_BUILT_IN_CLASS (olddecl);
DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl);
C_DECL_DECLARED_BUILTIN (newdecl) = 1;
if (new_is_prototype)
{
C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0;
if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL)
{
enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl);
switch (fncode)
{
/* If a compatible prototype of these builtin functions
is seen, assume the runtime implements it with the
expected semantics. */
case BUILT_IN_STPCPY:
if (builtin_decl_explicit_p (fncode))
set_builtin_decl_implicit_p (fncode, true);
break;
default:
if (builtin_decl_explicit_p (fncode))
set_builtin_decl_declared_p (fncode, true);
break;
}
copy_attributes_to_builtin (newdecl);
}
}
else
C_DECL_BUILTIN_PROTOTYPE (newdecl)
= C_DECL_BUILTIN_PROTOTYPE (olddecl);
}
/* Preserve function specific target and optimization options */
if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl)
&& !DECL_FUNCTION_SPECIFIC_TARGET (newdecl))
DECL_FUNCTION_SPECIFIC_TARGET (newdecl)
= DECL_FUNCTION_SPECIFIC_TARGET (olddecl);
if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl)
&& !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl))
DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl);
/* Also preserve various other info from the definition. */
if (!new_is_definition)
{
tree t;
DECL_RESULT (newdecl) = DECL_RESULT (olddecl);
DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl);
DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl);
DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl));
for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = newdecl;
/* See if we've got a function to instantiate from. */
if (DECL_SAVED_TREE (olddecl))
DECL_ABSTRACT_ORIGIN (newdecl)
= DECL_ABSTRACT_ORIGIN (olddecl);
}
}
/* Merge the USED information. */
if (TREE_USED (olddecl))
TREE_USED (newdecl) = 1;
else if (TREE_USED (newdecl))
TREE_USED (olddecl) = 1;
if (VAR_P (olddecl) || TREE_CODE (olddecl) == PARM_DECL)
DECL_READ_P (newdecl) |= DECL_READ_P (olddecl);
if (DECL_PRESERVE_P (olddecl))
DECL_PRESERVE_P (newdecl) = 1;
else if (DECL_PRESERVE_P (newdecl))
DECL_PRESERVE_P (olddecl) = 1;
/* Merge DECL_COMMON */
if (VAR_P (olddecl) && VAR_P (newdecl)
&& !lookup_attribute ("common", DECL_ATTRIBUTES (newdecl))
&& !lookup_attribute ("nocommon", DECL_ATTRIBUTES (newdecl)))
DECL_COMMON (newdecl) = DECL_COMMON (newdecl) && DECL_COMMON (olddecl);
/* Copy most of the decl-specific fields of NEWDECL into OLDDECL.
But preserve OLDDECL's DECL_UID, DECL_CONTEXT and
DECL_ARGUMENTS (if appropriate). */
{
unsigned olddecl_uid = DECL_UID (olddecl);
tree olddecl_context = DECL_CONTEXT (olddecl);
tree olddecl_arguments = NULL;
if (TREE_CODE (olddecl) == FUNCTION_DECL)
olddecl_arguments = DECL_ARGUMENTS (olddecl);
memcpy ((char *) olddecl + sizeof (struct tree_common),
(char *) newdecl + sizeof (struct tree_common),
sizeof (struct tree_decl_common) - sizeof (struct tree_common));
DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl);
switch (TREE_CODE (olddecl))
{
case FUNCTION_DECL:
case VAR_DECL:
{
struct symtab_node *snode = olddecl->decl_with_vis.symtab_node;
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common));
olddecl->decl_with_vis.symtab_node = snode;
if ((DECL_EXTERNAL (olddecl)
|| TREE_PUBLIC (olddecl)
|| TREE_STATIC (olddecl))
&& DECL_SECTION_NAME (newdecl) != NULL)
set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl));
/* This isn't quite correct for something like
int __thread x attribute ((tls_model ("local-exec")));
extern int __thread x;
as we'll lose the "local-exec" model. */
if (VAR_P (olddecl) && DECL_THREAD_LOCAL_P (newdecl))
set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl));
break;
}
case FIELD_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
case CONST_DECL:
case TYPE_DECL:
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common));
break;
default:
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common));
}
DECL_UID (olddecl) = olddecl_uid;
DECL_CONTEXT (olddecl) = olddecl_context;
if (TREE_CODE (olddecl) == FUNCTION_DECL)
DECL_ARGUMENTS (olddecl) = olddecl_arguments;
}
/* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl
so that encode_section_info has a chance to look at the new decl
flags and attributes. */
if (DECL_RTL_SET_P (olddecl)
&& (TREE_CODE (olddecl) == FUNCTION_DECL
|| (VAR_P (olddecl) && TREE_STATIC (olddecl))))
make_decl_rtl (olddecl);
}
/* Handle when a new declaration NEWDECL has the same name as an old
one OLDDECL in the same binding contour. Prints an error message
if appropriate.
If safely possible, alter OLDDECL to look like NEWDECL, and return
true. Otherwise, return false. */
static bool
duplicate_decls (tree newdecl, tree olddecl)
{
tree newtype = NULL, oldtype = NULL;
if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype))
{
/* Avoid `unused variable' and other warnings for OLDDECL. */
TREE_NO_WARNING (olddecl) = 1;
return false;
}
merge_decls (newdecl, olddecl, newtype, oldtype);
/* The NEWDECL will no longer be needed.
Before releasing the node, be sure to remove function from symbol
table that might have been inserted there to record comdat group.
Be sure to however do not free DECL_STRUCT_FUNCTION because this
structure is shared in between NEWDECL and OLDECL. */
if (TREE_CODE (newdecl) == FUNCTION_DECL)
DECL_STRUCT_FUNCTION (newdecl) = NULL;
if (VAR_OR_FUNCTION_DECL_P (newdecl))
{
struct symtab_node *snode = symtab_node::get (newdecl);
if (snode)
snode->remove ();
}
ggc_free (newdecl);
return true;
}
/* Check whether decl-node NEW_DECL shadows an existing declaration. */
static void
warn_if_shadowing (tree new_decl)
{
struct c_binding *b;
/* Shadow warnings wanted? */
if (!(warn_shadow
|| warn_shadow_local
|| warn_shadow_compatible_local)
/* No shadow warnings for internally generated vars. */
|| DECL_IS_BUILTIN (new_decl)
/* No shadow warnings for vars made for inlining. */
|| DECL_FROM_INLINE (new_decl))
return;
/* Is anything being shadowed? Invisible decls do not count. */
for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed)
if (b->decl && b->decl != new_decl && !b->invisible
&& (b->decl == error_mark_node
|| diagnostic_report_warnings_p (global_dc,
DECL_SOURCE_LOCATION (b->decl))))
{
tree old_decl = b->decl;
bool warned = false;
if (old_decl == error_mark_node)
{
warning (OPT_Wshadow, "declaration of %q+D shadows previous "
"non-variable", new_decl);
break;
}
else if (TREE_CODE (old_decl) == PARM_DECL)
{
enum opt_code warning_code;
/* If '-Wshadow=compatible-local' is specified without other
-Wshadow= flags, we will warn only when the types of the
shadowing variable (i.e. new_decl) and the shadowed variable
(old_decl) are compatible. */
if (warn_shadow)
warning_code = OPT_Wshadow;
else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl)))
warning_code = OPT_Wshadow_compatible_local;
else
warning_code = OPT_Wshadow_local;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code,
"declaration of %qD shadows a parameter",
new_decl);
}
else if (DECL_FILE_SCOPE_P (old_decl))
{
/* Do not warn if a variable shadows a function, unless
the variable is a function or a pointer-to-function. */
if (TREE_CODE (old_decl) == FUNCTION_DECL
&& TREE_CODE (new_decl) != FUNCTION_DECL
&& !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl)))
continue;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow,
"declaration of %qD shadows a global "
"declaration",
new_decl);
}
else if (TREE_CODE (old_decl) == FUNCTION_DECL
&& DECL_BUILT_IN (old_decl))
{
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", new_decl);
break;
}
else
{
enum opt_code warning_code;
/* If '-Wshadow=compatible-local' is specified without other
-Wshadow= flags, we will warn only when the types of the
shadowing variable (i.e. new_decl) and the shadowed variable
(old_decl) are compatible. */
if (warn_shadow)
warning_code = OPT_Wshadow;
else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl)))
warning_code = OPT_Wshadow_compatible_local;
else
warning_code = OPT_Wshadow_local;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code,
"declaration of %qD shadows a previous local",
new_decl);
}
if (warned)
inform (DECL_SOURCE_LOCATION (old_decl),
"shadowed declaration is here");
break;
}
}
/* Record a decl-node X as belonging to the current lexical scope.
Check for errors (such as an incompatible declaration for the same
name already seen in the same scope).
Returns either X or an old decl for the same name.
If an old decl is returned, it may have been smashed
to agree with what X says. */
tree
pushdecl (tree x)
{
tree name = DECL_NAME (x);
struct c_scope *scope = current_scope;
struct c_binding *b;
bool nested = false;
location_t locus = DECL_SOURCE_LOCATION (x);
/* Must set DECL_CONTEXT for everything not at file scope or
DECL_FILE_SCOPE_P won't work. Local externs don't count
unless they have initializers (which generate code). */
if (current_function_decl
&& (!VAR_OR_FUNCTION_DECL_P (x)
|| DECL_INITIAL (x) || !DECL_EXTERNAL (x)))
DECL_CONTEXT (x) = current_function_decl;
/* Anonymous decls are just inserted in the scope. */
if (!name)
{
bind (name, x, scope, /*invisible=*/false, /*nested=*/false,
locus);
return x;
}
/* First, see if there is another declaration with the same name in
the current scope. If there is, duplicate_decls may do all the
work for us. If duplicate_decls returns false, that indicates
two incompatible decls in the same scope; we are to silently
replace the old one (duplicate_decls has issued all appropriate
diagnostics). In particular, we should not consider possible
duplicates in the external scope, or shadowing. */
b = I_SYMBOL_BINDING (name);
if (b && B_IN_SCOPE (b, scope))
{
struct c_binding *b_ext, *b_use;
tree type = TREE_TYPE (x);
tree visdecl = b->decl;
tree vistype = TREE_TYPE (visdecl);
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& COMPLETE_TYPE_P (TREE_TYPE (x)))
b->inner_comp = false;
b_use = b;
b_ext = b;
/* If this is an external linkage declaration, we should check
for compatibility with the type in the external scope before
setting the type at this scope based on the visible
information only. */
if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl))
{
while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext))
b_ext = b_ext->shadowed;
if (b_ext)
{
b_use = b_ext;
if (b_use->u.type)
TREE_TYPE (b_use->decl) = b_use->u.type;
}
}
if (duplicate_decls (x, b_use->decl))
{
if (b_use != b)
{
/* Save the updated type in the external scope and
restore the proper type for this scope. */
tree thistype;
if (comptypes (vistype, type))
thistype = composite_type (vistype, type);
else
thistype = TREE_TYPE (b_use->decl);
b_use->u.type = TREE_TYPE (b_use->decl);
if (TREE_CODE (b_use->decl) == FUNCTION_DECL
&& DECL_BUILT_IN (b_use->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES
(b_use->u.type));
TREE_TYPE (b_use->decl) = thistype;
}
return b_use->decl;
}
else
goto skip_external_and_shadow_checks;
}
/* All declarations with external linkage, and all external
references, go in the external scope, no matter what scope is
current. However, the binding in that scope is ignored for
purposes of normal name lookup. A separate binding structure is
created in the requested scope; this governs the normal
visibility of the symbol.
The binding in the externals scope is used exclusively for
detecting duplicate declarations of the same object, no matter
what scope they are in; this is what we do here. (C99 6.2.7p2:
All declarations that refer to the same object or function shall
have compatible type; otherwise, the behavior is undefined.) */
if (DECL_EXTERNAL (x) || scope == file_scope)
{
tree type = TREE_TYPE (x);
tree vistype = NULL_TREE;
tree visdecl = NULL_TREE;
bool type_saved = false;
if (b && !B_IN_EXTERNAL_SCOPE (b)
&& VAR_OR_FUNCTION_DECL_P (b->decl)
&& DECL_FILE_SCOPE_P (b->decl))
{
visdecl = b->decl;
vistype = TREE_TYPE (visdecl);
}
if (scope != file_scope
&& !DECL_IN_SYSTEM_HEADER (x))
warning_at (locus, OPT_Wnested_externs,
"nested extern declaration of %qD", x);
while (b && !B_IN_EXTERNAL_SCOPE (b))
{
/* If this decl might be modified, save its type. This is
done here rather than when the decl is first bound
because the type may change after first binding, through
being completed or through attributes being added. If we
encounter multiple such decls, only the first should have
its type saved; the others will already have had their
proper types saved and the types will not have changed as
their scopes will not have been re-entered. */
if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved)
{
b->u.type = TREE_TYPE (b->decl);
type_saved = true;
}
if (B_IN_FILE_SCOPE (b)
&& VAR_P (b->decl)
&& TREE_STATIC (b->decl)
&& TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE
&& !TYPE_DOMAIN (TREE_TYPE (b->decl))
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type)
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type))
&& !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type))))
{
/* Array type completed in inner scope, which should be
diagnosed if the completion does not have size 1 and
it does not get completed in the file scope. */
b->inner_comp = true;
}
b = b->shadowed;
}
/* If a matching external declaration has been found, set its
type to the composite of all the types of that declaration.
After the consistency checks, it will be reset to the
composite of the visible types only. */
if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl))
&& b->u.type)
TREE_TYPE (b->decl) = b->u.type;
/* The point of the same_translation_unit_p check here is,
we want to detect a duplicate decl for a construct like
foo() { extern bar(); } ... static bar(); but not if
they are in different translation units. In any case,
the static does not go in the externals scope. */
if (b
&& (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl))
&& duplicate_decls (x, b->decl))
{
tree thistype;
if (vistype)
{
if (comptypes (vistype, type))
thistype = composite_type (vistype, type);
else
thistype = TREE_TYPE (b->decl);
}
else
thistype = type;
b->u.type = TREE_TYPE (b->decl);
if (TREE_CODE (b->decl) == FUNCTION_DECL && DECL_BUILT_IN (b->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES (b->u.type));
TREE_TYPE (b->decl) = thistype;
bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true,
locus);
return b->decl;
}
else if (TREE_PUBLIC (x))
{
if (visdecl && !b && duplicate_decls (x, visdecl))
{
/* An external declaration at block scope referring to a
visible entity with internal linkage. The composite
type will already be correct for this scope, so we
just need to fall through to make the declaration in
this scope. */
nested = true;
x = visdecl;
}
else
{
bind (name, x, external_scope, /*invisible=*/true,
/*nested=*/false, locus);
nested = true;
}
}
}
if (TREE_CODE (x) != PARM_DECL)
warn_if_shadowing (x);
skip_external_and_shadow_checks:
if (TREE_CODE (x) == TYPE_DECL)
{
/* So this is a typedef, set its underlying type. */
set_underlying_type (x);
/* If X is a typedef defined in the current function, record it
for the purpose of implementing the -Wunused-local-typedefs
warning. */
record_locally_defined_typedef (x);
}
bind (name, x, scope, /*invisible=*/false, nested, locus);
/* If x's type is incomplete because it's based on a
structure or union which has not yet been fully declared,
attach it to that structure or union type, so we can go
back and complete the variable declaration later, if the
structure or union gets fully declared.
If the input is erroneous, we can have error_mark in the type
slot (e.g. "f(void a, ...)") - that doesn't count as an
incomplete type. */
if (TREE_TYPE (x) != error_mark_node
&& !COMPLETE_TYPE_P (TREE_TYPE (x)))
{
tree element = TREE_TYPE (x);
while (TREE_CODE (element) == ARRAY_TYPE)
element = TREE_TYPE (element);
element = TYPE_MAIN_VARIANT (element);
if (RECORD_OR_UNION_TYPE_P (element)
&& (TREE_CODE (x) != TYPE_DECL
|| TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE)
&& !COMPLETE_TYPE_P (element))
C_TYPE_INCOMPLETE_VARS (element)
= tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element));
}
return x;
}
/* Issue a warning about implicit function declaration. ID is the function
identifier, OLDDECL is a declaration of the function in a different scope,
or NULL_TREE. */
static void
implicit_decl_warning (location_t loc, tree id, tree olddecl)
{
if (!warn_implicit_function_declaration)
return;
bool warned;
name_hint hint;
if (!olddecl)
hint = lookup_name_fuzzy (id, FUZZY_LOOKUP_FUNCTION_NAME, loc);
if (flag_isoc99)
{
if (hint)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (hint.suggestion ());
warned = pedwarn (&richloc, OPT_Wimplicit_function_declaration,
"implicit declaration of function %qE;"
" did you mean %qs?",
id, hint.suggestion ());
}
else
warned = pedwarn (loc, OPT_Wimplicit_function_declaration,
"implicit declaration of function %qE", id);
}
else if (hint)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (hint.suggestion ());
warned = warning_at
(&richloc, OPT_Wimplicit_function_declaration,
G_("implicit declaration of function %qE; did you mean %qs?"),
id, hint.suggestion ());
}
else
warned = warning_at (loc, OPT_Wimplicit_function_declaration,
G_("implicit declaration of function %qE"), id);
if (olddecl && warned)
locate_old_decl (olddecl);
if (!warned)
hint.suppress ();
}
/* This function represents mapping of a function code FCODE
to its respective header. */
static const char *
header_for_builtin_fn (enum built_in_function fcode)
{
switch (fcode)
{
CASE_FLT_FN (BUILT_IN_ACOS):
CASE_FLT_FN (BUILT_IN_ACOSH):
CASE_FLT_FN (BUILT_IN_ASIN):
CASE_FLT_FN (BUILT_IN_ASINH):
CASE_FLT_FN (BUILT_IN_ATAN):
CASE_FLT_FN (BUILT_IN_ATANH):
CASE_FLT_FN (BUILT_IN_ATAN2):
CASE_FLT_FN (BUILT_IN_CBRT):
CASE_FLT_FN (BUILT_IN_CEIL):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_CEIL):
CASE_FLT_FN (BUILT_IN_COPYSIGN):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN):
CASE_FLT_FN (BUILT_IN_COS):
CASE_FLT_FN (BUILT_IN_COSH):
CASE_FLT_FN (BUILT_IN_ERF):
CASE_FLT_FN (BUILT_IN_ERFC):
CASE_FLT_FN (BUILT_IN_EXP):
CASE_FLT_FN (BUILT_IN_EXP2):
CASE_FLT_FN (BUILT_IN_EXPM1):
CASE_FLT_FN (BUILT_IN_FABS):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS):
CASE_FLT_FN (BUILT_IN_FDIM):
CASE_FLT_FN (BUILT_IN_FLOOR):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FLOOR):
CASE_FLT_FN (BUILT_IN_FMA):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMA):
CASE_FLT_FN (BUILT_IN_FMAX):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMAX):
CASE_FLT_FN (BUILT_IN_FMIN):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMIN):
CASE_FLT_FN (BUILT_IN_FMOD):
CASE_FLT_FN (BUILT_IN_FREXP):
CASE_FLT_FN (BUILT_IN_HYPOT):
CASE_FLT_FN (BUILT_IN_ILOGB):
CASE_FLT_FN (BUILT_IN_LDEXP):
CASE_FLT_FN (BUILT_IN_LGAMMA):
CASE_FLT_FN (BUILT_IN_LLRINT):
CASE_FLT_FN (BUILT_IN_LLROUND):
CASE_FLT_FN (BUILT_IN_LOG):
CASE_FLT_FN (BUILT_IN_LOG10):
CASE_FLT_FN (BUILT_IN_LOG1P):
CASE_FLT_FN (BUILT_IN_LOG2):
CASE_FLT_FN (BUILT_IN_LOGB):
CASE_FLT_FN (BUILT_IN_LRINT):
CASE_FLT_FN (BUILT_IN_LROUND):
CASE_FLT_FN (BUILT_IN_MODF):
CASE_FLT_FN (BUILT_IN_NAN):
CASE_FLT_FN (BUILT_IN_NEARBYINT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_NEARBYINT):
CASE_FLT_FN (BUILT_IN_NEXTAFTER):
CASE_FLT_FN (BUILT_IN_NEXTTOWARD):
CASE_FLT_FN (BUILT_IN_POW):
CASE_FLT_FN (BUILT_IN_REMAINDER):
CASE_FLT_FN (BUILT_IN_REMQUO):
CASE_FLT_FN (BUILT_IN_RINT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_RINT):
CASE_FLT_FN (BUILT_IN_ROUND):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_ROUND):
CASE_FLT_FN (BUILT_IN_SCALBLN):
CASE_FLT_FN (BUILT_IN_SCALBN):
CASE_FLT_FN (BUILT_IN_SIN):
CASE_FLT_FN (BUILT_IN_SINH):
CASE_FLT_FN (BUILT_IN_SINCOS):
CASE_FLT_FN (BUILT_IN_SQRT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT):
CASE_FLT_FN (BUILT_IN_TAN):
CASE_FLT_FN (BUILT_IN_TANH):
CASE_FLT_FN (BUILT_IN_TGAMMA):
CASE_FLT_FN (BUILT_IN_TRUNC):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_TRUNC):
case BUILT_IN_ISINF:
case BUILT_IN_ISNAN:
return "<math.h>";
CASE_FLT_FN (BUILT_IN_CABS):
CASE_FLT_FN (BUILT_IN_CACOS):
CASE_FLT_FN (BUILT_IN_CACOSH):
CASE_FLT_FN (BUILT_IN_CARG):
CASE_FLT_FN (BUILT_IN_CASIN):
CASE_FLT_FN (BUILT_IN_CASINH):
CASE_FLT_FN (BUILT_IN_CATAN):
CASE_FLT_FN (BUILT_IN_CATANH):
CASE_FLT_FN (BUILT_IN_CCOS):
CASE_FLT_FN (BUILT_IN_CCOSH):
CASE_FLT_FN (BUILT_IN_CEXP):
CASE_FLT_FN (BUILT_IN_CIMAG):
CASE_FLT_FN (BUILT_IN_CLOG):
CASE_FLT_FN (BUILT_IN_CONJ):
CASE_FLT_FN (BUILT_IN_CPOW):
CASE_FLT_FN (BUILT_IN_CPROJ):
CASE_FLT_FN (BUILT_IN_CREAL):
CASE_FLT_FN (BUILT_IN_CSIN):
CASE_FLT_FN (BUILT_IN_CSINH):
CASE_FLT_FN (BUILT_IN_CSQRT):
CASE_FLT_FN (BUILT_IN_CTAN):
CASE_FLT_FN (BUILT_IN_CTANH):
return "<complex.h>";
case BUILT_IN_MEMCHR:
case BUILT_IN_MEMCMP:
case BUILT_IN_MEMCPY:
case BUILT_IN_MEMMOVE:
case BUILT_IN_MEMSET:
case BUILT_IN_STRCAT:
case BUILT_IN_STRCHR:
case BUILT_IN_STRCMP:
case BUILT_IN_STRCPY:
case BUILT_IN_STRCSPN:
case BUILT_IN_STRLEN:
case BUILT_IN_STRNCAT:
case BUILT_IN_STRNCMP:
case BUILT_IN_STRNCPY:
case BUILT_IN_STRPBRK:
case BUILT_IN_STRRCHR:
case BUILT_IN_STRSPN:
case BUILT_IN_STRSTR:
return "<string.h>";
case BUILT_IN_FPRINTF:
case BUILT_IN_PUTC:
case BUILT_IN_FPUTC:
case BUILT_IN_FPUTS:
case BUILT_IN_FSCANF:
case BUILT_IN_FWRITE:
case BUILT_IN_PRINTF:
case BUILT_IN_PUTCHAR:
case BUILT_IN_PUTS:
case BUILT_IN_SCANF:
case BUILT_IN_SNPRINTF:
case BUILT_IN_SPRINTF:
case BUILT_IN_SSCANF:
case BUILT_IN_VFPRINTF:
case BUILT_IN_VFSCANF:
case BUILT_IN_VPRINTF:
case BUILT_IN_VSCANF:
case BUILT_IN_VSNPRINTF:
case BUILT_IN_VSPRINTF:
case BUILT_IN_VSSCANF:
return "<stdio.h>";
case BUILT_IN_ISALNUM:
case BUILT_IN_ISALPHA:
case BUILT_IN_ISBLANK:
case BUILT_IN_ISCNTRL:
case BUILT_IN_ISDIGIT:
case BUILT_IN_ISGRAPH:
case BUILT_IN_ISLOWER:
case BUILT_IN_ISPRINT:
case BUILT_IN_ISPUNCT:
case BUILT_IN_ISSPACE:
case BUILT_IN_ISUPPER:
case BUILT_IN_ISXDIGIT:
case BUILT_IN_TOLOWER:
case BUILT_IN_TOUPPER:
return "<ctype.h>";
case BUILT_IN_ISWALNUM:
case BUILT_IN_ISWALPHA:
case BUILT_IN_ISWBLANK:
case BUILT_IN_ISWCNTRL:
case BUILT_IN_ISWDIGIT:
case BUILT_IN_ISWGRAPH:
case BUILT_IN_ISWLOWER:
case BUILT_IN_ISWPRINT:
case BUILT_IN_ISWPUNCT:
case BUILT_IN_ISWSPACE:
case BUILT_IN_ISWUPPER:
case BUILT_IN_ISWXDIGIT:
case BUILT_IN_TOWLOWER:
case BUILT_IN_TOWUPPER:
return "<wctype.h>";
case BUILT_IN_ABORT:
case BUILT_IN_ABS:
case BUILT_IN_CALLOC:
case BUILT_IN_EXIT:
case BUILT_IN_FREE:
case BUILT_IN_LABS:
case BUILT_IN_LLABS:
case BUILT_IN_MALLOC:
case BUILT_IN_REALLOC:
case BUILT_IN__EXIT2:
case BUILT_IN_ALIGNED_ALLOC:
return "<stdlib.h>";
case BUILT_IN_IMAXABS:
return "<inttypes.h>";
case BUILT_IN_STRFTIME:
return "<time.h>";
default:
return NULL;
}
}
/* Generate an implicit declaration for identifier FUNCTIONID at LOC as a
function of type int (). */
tree
implicitly_declare (location_t loc, tree functionid)
{
struct c_binding *b;
tree decl = NULL_TREE;
tree asmspec_tree;
for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed)
{
if (B_IN_SCOPE (b, external_scope))
{
decl = b->decl;
break;
}
}
if (decl)
{
if (TREE_CODE (decl) != FUNCTION_DECL)
return decl;
/* FIXME: Objective-C has weird not-really-builtin functions
which are supposed to be visible automatically. They wind up
in the external scope because they're pushed before the file
scope gets created. Catch this here and rebind them into the
file scope. */
if (!DECL_BUILT_IN (decl) && DECL_IS_BUILTIN (decl))
{
bind (functionid, decl, file_scope,
/*invisible=*/false, /*nested=*/true,
DECL_SOURCE_LOCATION (decl));
return decl;
}
else
{
tree newtype = default_function_type;
if (b->u.type)
TREE_TYPE (decl) = b->u.type;
/* Implicit declaration of a function already declared
(somehow) in a different scope, or as a built-in.
If this is the first time this has happened, warn;
then recycle the old declaration but with the new type. */
if (!C_DECL_IMPLICIT (decl))
{
implicit_decl_warning (loc, functionid, decl);
C_DECL_IMPLICIT (decl) = 1;
}
if (DECL_BUILT_IN (decl))
{
newtype = build_type_attribute_variant (newtype,
TYPE_ATTRIBUTES
(TREE_TYPE (decl)));
if (!comptypes (newtype, TREE_TYPE (decl)))
{
bool warned = warning_at (loc, 0, "incompatible implicit "
"declaration of built-in "
"function %qD", decl);
/* See if we can hint which header to include. */
const char *header
= header_for_builtin_fn (DECL_FUNCTION_CODE (decl));
if (header != NULL && warned)
{
rich_location richloc (line_table, loc);
maybe_add_include_fixit (&richloc, header);
inform (&richloc,
"include %qs or provide a declaration of %qD",
header, decl);
}
newtype = TREE_TYPE (decl);
}
}
else
{
if (!comptypes (newtype, TREE_TYPE (decl)))
{
error_at (loc, "incompatible implicit declaration of "
"function %qD", decl);
locate_old_decl (decl);
}
}
b->u.type = TREE_TYPE (decl);
TREE_TYPE (decl) = newtype;
bind (functionid, decl, current_scope,
/*invisible=*/false, /*nested=*/true,
DECL_SOURCE_LOCATION (decl));
return decl;
}
}
/* Not seen before. */
decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type);
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
C_DECL_IMPLICIT (decl) = 1;
implicit_decl_warning (loc, functionid, 0);
asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL);
if (asmspec_tree)
set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree));
/* C89 says implicit declarations are in the innermost block.
So we record the decl in the standard fashion. */
decl = pushdecl (decl);
/* No need to call objc_check_decl here - it's a function type. */
rest_of_decl_compilation (decl, 0, 0);
/* Write a record describing this implicit function declaration
to the prototypes file (if requested). */
gen_aux_info_record (decl, 0, 1, 0);
/* Possibly apply some default attributes to this implicit declaration. */
decl_attributes (&decl, NULL_TREE, 0);
return decl;
}
/* Issue an error message for a reference to an undeclared variable
ID, including a reference to a builtin outside of function-call
context. Establish a binding of the identifier to error_mark_node
in an appropriate scope, which will suppress further errors for the
same identifier. The error message should be given location LOC. */
void
undeclared_variable (location_t loc, tree id)
{
static bool already = false;
struct c_scope *scope;
if (current_function_decl == NULL_TREE)
{
name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc);
if (guessed_id)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (guessed_id.suggestion ());
error_at (&richloc,
"%qE undeclared here (not in a function);"
" did you mean %qs?",
id, guessed_id.suggestion ());
}
else
error_at (loc, "%qE undeclared here (not in a function)", id);
scope = current_scope;
}
else
{
if (!objc_diagnose_private_ivar (id))
{
name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc);
if (guessed_id)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (guessed_id.suggestion ());
error_at (&richloc,
"%qE undeclared (first use in this function);"
" did you mean %qs?",
id, guessed_id.suggestion ());
}
else
error_at (loc, "%qE undeclared (first use in this function)", id);
}
if (!already)
{
inform (loc, "each undeclared identifier is reported only"
" once for each function it appears in");
already = true;
}
/* If we are parsing old-style parameter decls, current_function_decl
will be nonnull but current_function_scope will be null. */
scope = current_function_scope ? current_function_scope : current_scope;
}
bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
}
/* Subroutine of lookup_label, declare_label, define_label: construct a
LABEL_DECL with all the proper frills. Also create a struct
c_label_vars initialized for the current scope. */
static tree
make_label (location_t location, tree name, bool defining,
struct c_label_vars **p_label_vars)
{
tree label = build_decl (location, LABEL_DECL, name, void_type_node);
DECL_CONTEXT (label) = current_function_decl;
SET_DECL_MODE (label, VOIDmode);
c_label_vars *label_vars = ggc_alloc<c_label_vars> ();
label_vars->shadowed = NULL;
set_spot_bindings (&label_vars->label_bindings, defining);
label_vars->decls_in_scope = make_tree_vector ();
label_vars->gotos = NULL;
*p_label_vars = label_vars;
return label;
}
/* Get the LABEL_DECL corresponding to identifier NAME as a label.
Create one if none exists so far for the current function.
This is called when a label is used in a goto expression or
has its address taken. */
tree
lookup_label (tree name)
{
tree label;
struct c_label_vars *label_vars;
if (current_function_scope == 0)
{
error ("label %qE referenced outside of any function", name);
return NULL_TREE;
}
/* Use a label already defined or ref'd with this name, but not if
it is inherited from a containing function and wasn't declared
using __label__. */
label = I_LABEL_DECL (name);
if (label && (DECL_CONTEXT (label) == current_function_decl
|| C_DECLARED_LABEL_FLAG (label)))
{
/* If the label has only been declared, update its apparent
location to point here, for better diagnostics if it
turns out not to have been defined. */
if (DECL_INITIAL (label) == NULL_TREE)
DECL_SOURCE_LOCATION (label) = input_location;
return label;
}
/* No label binding for that identifier; make one. */
label = make_label (input_location, name, false, &label_vars);
/* Ordinary labels go in the current function scope. */
bind_label (name, label, current_function_scope, label_vars);
return label;
}
/* Issue a warning about DECL for a goto statement at GOTO_LOC going
to LABEL. */
static void
warn_about_goto (location_t goto_loc, tree label, tree decl)
{
if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
error_at (goto_loc,
"jump into scope of identifier with variably modified type");
else
warning_at (goto_loc, OPT_Wjump_misses_init,
"jump skips variable initialization");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label);
inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl);
}
/* Look up a label because of a goto statement. This is like
lookup_label, but also issues any appropriate warnings. */
tree
lookup_label_for_goto (location_t loc, tree name)
{
tree label;
struct c_label_vars *label_vars;
unsigned int ix;
tree decl;
label = lookup_label (name);
if (label == NULL_TREE)
return NULL_TREE;
/* If we are jumping to a different function, we can't issue any
useful warnings. */
if (DECL_CONTEXT (label) != current_function_decl)
{
gcc_assert (C_DECLARED_LABEL_FLAG (label));
return label;
}
label_vars = I_LABEL_BINDING (name)->u.label;
/* If the label has not yet been defined, then push this goto on a
list for possible later warnings. */
if (label_vars->label_bindings.scope == NULL)
{
c_goto_bindings *g = ggc_alloc<c_goto_bindings> ();
g->loc = loc;
set_spot_bindings (&g->goto_bindings, true);
vec_safe_push (label_vars->gotos, g);
return label;
}
/* If there are any decls in label_vars->decls_in_scope, then this
goto has missed the declaration of the decl. This happens for a
case like
int i = 1;
lab:
...
goto lab;
Issue a warning or error. */
FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl)
warn_about_goto (loc, label, decl);
if (label_vars->label_bindings.left_stmt_expr)
{
error_at (loc, "jump into statement expression");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label);
}
return label;
}
/* Make a label named NAME in the current function, shadowing silently
any that may be inherited from containing functions or containing
scopes. This is called for __label__ declarations. */
tree
declare_label (tree name)
{
struct c_binding *b = I_LABEL_BINDING (name);
tree label;
struct c_label_vars *label_vars;
/* Check to make sure that the label hasn't already been declared
at this scope */
if (b && B_IN_CURRENT_SCOPE (b))
{
error ("duplicate label declaration %qE", name);
locate_old_decl (b->decl);
/* Just use the previous declaration. */
return b->decl;
}
label = make_label (input_location, name, false, &label_vars);
C_DECLARED_LABEL_FLAG (label) = 1;
/* Declared labels go in the current scope. */
bind_label (name, label, current_scope, label_vars);
return label;
}
/* When we define a label, issue any appropriate warnings if there are
any gotos earlier in the function which jump to this label. */
static void
check_earlier_gotos (tree label, struct c_label_vars* label_vars)
{
unsigned int ix;
struct c_goto_bindings *g;
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
struct c_binding *b;
struct c_scope *scope;
/* We have a goto to this label. The goto is going forward. In
g->scope, the goto is going to skip any binding which was
defined after g->bindings_in_scope. */
if (g->goto_bindings.scope->has_jump_unsafe_decl)
{
for (b = g->goto_bindings.scope->bindings;
b != g->goto_bindings.bindings_in_scope;
b = b->prev)
{
if (decl_jump_unsafe (b->decl))
warn_about_goto (g->loc, label, b->decl);
}
}
/* We also need to warn about decls defined in any scopes
between the scope of the label and the scope of the goto. */
for (scope = label_vars->label_bindings.scope;
scope != g->goto_bindings.scope;
scope = scope->outer)
{
gcc_assert (scope != NULL);
if (scope->has_jump_unsafe_decl)
{
if (scope == label_vars->label_bindings.scope)
b = label_vars->label_bindings.bindings_in_scope;
else
b = scope->bindings;
for (; b != NULL; b = b->prev)
{
if (decl_jump_unsafe (b->decl))
warn_about_goto (g->loc, label, b->decl);
}
}
}
if (g->goto_bindings.stmt_exprs > 0)
{
error_at (g->loc, "jump into statement expression");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here",
label);
}
}
/* Now that the label is defined, we will issue warnings about
subsequent gotos to this label when we see them. */
vec_safe_truncate (label_vars->gotos, 0);
label_vars->gotos = NULL;
}
/* Define a label, specifying the location in the source file.
Return the LABEL_DECL node for the label, if the definition is valid.
Otherwise return NULL_TREE. */
tree
define_label (location_t location, tree name)
{
/* Find any preexisting label with this name. It is an error
if that label has already been defined in this function, or
if there is a containing function with a declared label with
the same name. */
tree label = I_LABEL_DECL (name);
if (label
&& ((DECL_CONTEXT (label) == current_function_decl
&& DECL_INITIAL (label) != NULL_TREE)
|| (DECL_CONTEXT (label) != current_function_decl
&& C_DECLARED_LABEL_FLAG (label))))
{
error_at (location, "duplicate label %qD", label);
locate_old_decl (label);
return NULL_TREE;
}
else if (label && DECL_CONTEXT (label) == current_function_decl)
{
struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label;
/* The label has been used or declared already in this function,
but not defined. Update its location to point to this
definition. */
DECL_SOURCE_LOCATION (label) = location;
set_spot_bindings (&label_vars->label_bindings, true);
/* Issue warnings as required about any goto statements from
earlier in the function. */
check_earlier_gotos (label, label_vars);
}
else
{
struct c_label_vars *label_vars;
/* No label binding for that identifier; make one. */
label = make_label (location, name, true, &label_vars);
/* Ordinary labels go in the current function scope. */
bind_label (name, label, current_function_scope, label_vars);
}
if (!in_system_header_at (input_location) && lookup_name (name))
warning_at (location, OPT_Wtraditional,
"traditional C lacks a separate namespace "
"for labels, identifier %qE conflicts", name);
/* Mark label as having been defined. */
DECL_INITIAL (label) = error_mark_node;
return label;
}
/* Get the bindings for a new switch statement. This is used to issue
warnings as appropriate for jumps from the switch to case or
default labels. */
struct c_spot_bindings *
c_get_switch_bindings (void)
{
struct c_spot_bindings *switch_bindings;
switch_bindings = XNEW (struct c_spot_bindings);
set_spot_bindings (switch_bindings, true);
return switch_bindings;
}
void
c_release_switch_bindings (struct c_spot_bindings *bindings)
{
gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr);
XDELETE (bindings);
}
/* This is called at the point of a case or default label to issue
warnings about decls as needed. It returns true if it found an
error, not just a warning. */
bool
c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings,
location_t switch_loc, location_t case_loc)
{
bool saw_error;
struct c_scope *scope;
saw_error = false;
for (scope = current_scope;
scope != switch_bindings->scope;
scope = scope->outer)
{
struct c_binding *b;
gcc_assert (scope != NULL);
if (!scope->has_jump_unsafe_decl)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
if (decl_jump_unsafe (b->decl))
{
if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE))
{
saw_error = true;
error_at (case_loc,
("switch jumps into scope of identifier with "
"variably modified type"));
}
else
warning_at (case_loc, OPT_Wjump_misses_init,
"switch jumps over variable initialization");
inform (switch_loc, "switch starts here");
inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here",
b->decl);
}
}
}
if (switch_bindings->stmt_exprs > 0)
{
saw_error = true;
error_at (case_loc, "switch jumps into statement expression");
inform (switch_loc, "switch starts here");
}
return saw_error;
}
/* Given NAME, an IDENTIFIER_NODE,
return the structure (or union or enum) definition for that name.
If THISLEVEL_ONLY is nonzero, searches only the current_scope.
CODE says which kind of type the caller wants;
it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE.
If PLOC is not NULL and this returns non-null, it sets *PLOC to the
location where the tag was defined.
If the wrong kind of type is found, an error is reported. */
static tree
lookup_tag (enum tree_code code, tree name, bool thislevel_only,
location_t *ploc)
{
struct c_binding *b = I_TAG_BINDING (name);
bool thislevel = false;
if (!b || !b->decl)
return NULL_TREE;
/* We only care about whether it's in this level if
thislevel_only was set or it might be a type clash. */
if (thislevel_only || TREE_CODE (b->decl) != code)
{
/* For our purposes, a tag in the external scope is the same as
a tag in the file scope. (Primarily relevant to Objective-C
and its builtin structure tags, which get pushed before the
file scope is created.) */
if (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
thislevel = true;
}
if (thislevel_only && !thislevel)
return NULL_TREE;
if (TREE_CODE (b->decl) != code)
{
/* Definition isn't the kind we were looking for. */
pending_invalid_xref = name;
pending_invalid_xref_location = input_location;
/* If in the same binding level as a declaration as a tag
of a different type, this must not be allowed to
shadow that tag, so give the error immediately.
(For example, "struct foo; union foo;" is invalid.) */
if (thislevel)
pending_xref_error ();
}
if (ploc != NULL)
*ploc = b->locus;
return b->decl;
}
/* Return true if a definition exists for NAME with code CODE. */
bool
tag_exists_p (enum tree_code code, tree name)
{
struct c_binding *b = I_TAG_BINDING (name);
if (b == NULL || b->decl == NULL_TREE)
return false;
return TREE_CODE (b->decl) == code;
}
/* Print an error message now
for a recent invalid struct, union or enum cross reference.
We don't print them immediately because they are not invalid
when used in the `struct foo;' construct for shadowing. */
void
pending_xref_error (void)
{
if (pending_invalid_xref != NULL_TREE)
error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag",
pending_invalid_xref);
pending_invalid_xref = NULL_TREE;
}
/* Look up NAME in the current scope and its superiors
in the namespace of variables, functions and typedefs.
Return a ..._DECL node of some kind representing its definition,
or return NULL_TREE if it is undefined. */
tree
lookup_name (tree name)
{
struct c_binding *b = I_SYMBOL_BINDING (name);
if (b && !b->invisible)
{
maybe_record_typedef_use (b->decl);
return b->decl;
}
return NULL_TREE;
}
/* Similar to `lookup_name' but look only at the indicated scope. */
static tree
lookup_name_in_scope (tree name, struct c_scope *scope)
{
struct c_binding *b;
for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed)
if (B_IN_SCOPE (b, scope))
return b->decl;
return NULL_TREE;
}
/* Look for the closest match for NAME within the currently valid
scopes.
This finds the identifier with the lowest Levenshtein distance to
NAME. If there are multiple candidates with equal minimal distance,
the first one found is returned. Scopes are searched from innermost
outwards, and within a scope in reverse order of declaration, thus
benefiting candidates "near" to the current scope.
The function also looks for similar macro names to NAME, since a
misspelled macro name will not be expanded, and hence looks like an
identifier to the C frontend.
It also looks for start_typename keywords, to detect "singed" vs "signed"
typos.
Use LOC for any deferred diagnostics. */
name_hint
lookup_name_fuzzy (tree name, enum lookup_name_fuzzy_kind kind, location_t loc)
{
gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE);
/* First, try some well-known names in the C standard library, in case
the user forgot a #include. */
const char *header_hint
= get_c_stdlib_header_for_name (IDENTIFIER_POINTER (name));
if (header_hint)
return name_hint (NULL,
new suggest_missing_header (loc,
IDENTIFIER_POINTER (name),
header_hint));
/* Only suggest names reserved for the implementation if NAME begins
with an underscore. */
bool consider_implementation_names = (IDENTIFIER_POINTER (name)[0] == '_');
best_match<tree, tree> bm (name);
/* Look within currently valid scopes. */
for (c_scope *scope = current_scope; scope; scope = scope->outer)
for (c_binding *binding = scope->bindings; binding; binding = binding->prev)
{
if (!binding->id || binding->invisible)
continue;
if (binding->decl == error_mark_node)
continue;
/* Don't use bindings from implicitly declared functions,
as they were likely misspellings themselves. */
if (TREE_CODE (binding->decl) == FUNCTION_DECL)
if (C_DECL_IMPLICIT (binding->decl))
continue;
/* Don't suggest names that are reserved for use by the
implementation, unless NAME began with an underscore. */
if (!consider_implementation_names)
{
const char *suggestion_str = IDENTIFIER_POINTER (binding->id);
if (name_reserved_for_implementation_p (suggestion_str))
continue;
}
switch (kind)
{
case FUZZY_LOOKUP_TYPENAME:
if (TREE_CODE (binding->decl) != TYPE_DECL)
continue;
break;
case FUZZY_LOOKUP_FUNCTION_NAME:
if (TREE_CODE (binding->decl) != FUNCTION_DECL)
{
/* Allow function pointers. */
if ((VAR_P (binding->decl)
|| TREE_CODE (binding->decl) == PARM_DECL)
&& TREE_CODE (TREE_TYPE (binding->decl)) == POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (binding->decl)))
== FUNCTION_TYPE))
break;
continue;
}
break;
default:
break;
}
bm.consider (binding->id);
}
/* Consider macros: if the user misspelled a macro name e.g. "SOME_MACRO"
as:
x = SOME_OTHER_MACRO (y);
then "SOME_OTHER_MACRO" will survive to the frontend and show up
as a misspelled identifier.
Use the best distance so far so that a candidate is only set if
a macro is better than anything so far. This allows early rejection
(without calculating the edit distance) of macro names that must have
distance >= bm.get_best_distance (), and means that we only get a
non-NULL result for best_macro_match if it's better than any of
the identifiers already checked, which avoids needless creation
of identifiers for macro hashnodes. */
best_macro_match bmm (name, bm.get_best_distance (), parse_in);
cpp_hashnode *best_macro = bmm.get_best_meaningful_candidate ();
/* If a macro is the closest so far to NAME, use it, creating an
identifier tree node for it. */
if (best_macro)
{
const char *id = (const char *)best_macro->ident.str;
tree macro_as_identifier
= get_identifier_with_length (id, best_macro->ident.len);
bm.set_best_so_far (macro_as_identifier,
bmm.get_best_distance (),
bmm.get_best_candidate_length ());
}
/* Try the "start_typename" keywords to detect
"singed" vs "signed" typos. */
if (kind == FUZZY_LOOKUP_TYPENAME)
{
for (unsigned i = 0; i < num_c_common_reswords; i++)
{
const c_common_resword *resword = &c_common_reswords[i];
if (!c_keyword_starts_typename (resword->rid))
continue;
tree resword_identifier = ridpointers [resword->rid];
if (!resword_identifier)
continue;
gcc_assert (TREE_CODE (resword_identifier) == IDENTIFIER_NODE);
bm.consider (resword_identifier);
}
}
tree best = bm.get_best_meaningful_candidate ();
if (best)
return name_hint (IDENTIFIER_POINTER (best), NULL);
else
return name_hint (NULL, NULL);
}
/* Create the predefined scalar types of C,
and some nodes representing standard constants (0, 1, (void *) 0).
Initialize the global scope.
Make definitions for built-in primitive functions. */
void
c_init_decl_processing (void)
{
location_t save_loc = input_location;
/* Initialize reserved words for parser. */
c_parse_init ();
current_function_decl = NULL_TREE;
gcc_obstack_init (&parser_obstack);
/* Make the externals scope. */
push_scope ();
external_scope = current_scope;
/* Declarations from c_common_nodes_and_builtins must not be associated
with this input file, lest we get differences between using and not
using preprocessed headers. */
input_location = BUILTINS_LOCATION;
c_common_nodes_and_builtins ();
/* In C, comparisons and TRUTH_* expressions have type int. */
truthvalue_type_node = integer_type_node;
truthvalue_true_node = integer_one_node;
truthvalue_false_node = integer_zero_node;
/* Even in C99, which has a real boolean type. */
pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"),
boolean_type_node));
input_location = save_loc;
make_fname_decl = c_make_fname_decl;
start_fname_decls ();
}
/* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to
give the decl, NAME is the initialization string and TYPE_DEP
indicates whether NAME depended on the type of the function. As we
don't yet implement delayed emission of static data, we mark the
decl as emitted so it is not placed in the output. Anything using
it must therefore pull out the STRING_CST initializer directly.
FIXME. */
static tree
c_make_fname_decl (location_t loc, tree id, int type_dep)
{
const char *name = fname_as_string (type_dep);
tree decl, type, init;
size_t length = strlen (name);
type = build_array_type (char_type_node,
build_index_type (size_int (length)));
type = c_build_qualified_type (type, TYPE_QUAL_CONST);
decl = build_decl (loc, VAR_DECL, id, type);
TREE_STATIC (decl) = 1;
TREE_READONLY (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
init = build_string (length + 1, name);
free (CONST_CAST (char *, name));
TREE_TYPE (init) = type;
DECL_INITIAL (decl) = init;
TREE_USED (decl) = 1;
if (current_function_decl
/* For invalid programs like this:
void foo()
const char* p = __FUNCTION__;
the __FUNCTION__ is believed to appear in K&R style function
parameter declarator. In that case we still don't have
function_scope. */
&& current_function_scope)
{
DECL_CONTEXT (decl) = current_function_decl;
bind (id, decl, current_function_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
}
finish_decl (decl, loc, init, NULL_TREE, NULL_TREE);
return decl;
}
tree
c_builtin_function (tree decl)
{
tree type = TREE_TYPE (decl);
tree id = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (id);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
/* Should never be called on a symbol with a preexisting meaning. */
gcc_assert (!I_SYMBOL_BINDING (id));
bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false,
UNKNOWN_LOCATION);
/* Builtins in the implementation namespace are made visible without
needing to be explicitly declared. See push_file_scope. */
if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1])))
{
DECL_CHAIN (decl) = visible_builtins;
visible_builtins = decl;
}
return decl;
}
tree
c_builtin_function_ext_scope (tree decl)
{
tree type = TREE_TYPE (decl);
tree id = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (id);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
if (external_scope)
bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
/* Builtins in the implementation namespace are made visible without
needing to be explicitly declared. See push_file_scope. */
if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1])))
{
DECL_CHAIN (decl) = visible_builtins;
visible_builtins = decl;
}
return decl;
}
/* Called when a declaration is seen that contains no names to declare.
If its type is a reference to a structure, union or enum inherited
from a containing scope, shadow that tag name for the current scope
with a forward reference.
If its type defines a new named structure or union
or defines an enum, it is valid but we need not do anything here.
Otherwise, it is an error. */
void
shadow_tag (const struct c_declspecs *declspecs)
{
shadow_tag_warned (declspecs, 0);
}
/* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning,
but no pedwarn. */
void
shadow_tag_warned (const struct c_declspecs *declspecs, int warned)
{
bool found_tag = false;
if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p)
{
tree value = declspecs->type;
enum tree_code code = TREE_CODE (value);
if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE)
/* Used to test also that TYPE_SIZE (value) != 0.
That caused warning for `struct foo;' at top level in the file. */
{
tree name = TYPE_NAME (value);
tree t;
found_tag = true;
if (declspecs->restrict_p)
{
error ("invalid use of %<restrict%>");
warned = 1;
}
if (name == NULL_TREE)
{
if (warned != 1 && code != ENUMERAL_TYPE)
/* Empty unnamed enum OK */
{
pedwarn (input_location, 0,
"unnamed struct/union that defines no instances");
warned = 1;
}
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->storage_class != csc_none)
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with storage class specifier "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& (declspecs->const_p
|| declspecs->volatile_p
|| declspecs->atomic_p
|| declspecs->restrict_p
|| declspecs->address_space))
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with type qualifier "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->alignas_p)
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with %<_Alignas%> "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else
{
pending_invalid_xref = NULL_TREE;
t = lookup_tag (code, name, true, NULL);
if (t == NULL_TREE)
{
t = make_node (code);
pushtag (input_location, name, t);
}
}
}
else
{
if (warned != 1 && !in_system_header_at (input_location))
{
pedwarn (input_location, 0,
"useless type name in empty declaration");
warned = 1;
}
}
}
else if (warned != 1 && !in_system_header_at (input_location)
&& declspecs->typedef_p)
{
pedwarn (input_location, 0, "useless type name in empty declaration");
warned = 1;
}
pending_invalid_xref = NULL_TREE;
if (declspecs->inline_p)
{
error ("%<inline%> in empty declaration");
warned = 1;
}
if (declspecs->noreturn_p)
{
error ("%<_Noreturn%> in empty declaration");
warned = 1;
}
if (current_scope == file_scope && declspecs->storage_class == csc_auto)
{
error ("%<auto%> in file-scope empty declaration");
warned = 1;
}
if (current_scope == file_scope && declspecs->storage_class == csc_register)
{
error ("%<register%> in file-scope empty declaration");
warned = 1;
}
if (!warned && !in_system_header_at (input_location)
&& declspecs->storage_class != csc_none)
{
warning (0, "useless storage class specifier in empty declaration");
warned = 2;
}
if (!warned && !in_system_header_at (input_location) && declspecs->thread_p)
{
warning (0, "useless %qs in empty declaration",
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
warned = 2;
}
if (!warned
&& !in_system_header_at (input_location)
&& (declspecs->const_p
|| declspecs->volatile_p
|| declspecs->atomic_p
|| declspecs->restrict_p
|| declspecs->address_space))
{
warning (0, "useless type qualifier in empty declaration");
warned = 2;
}
if (!warned && !in_system_header_at (input_location)
&& declspecs->alignas_p)
{
warning (0, "useless %<_Alignas%> in empty declaration");
warned = 2;
}
if (warned != 1)
{
if (!found_tag)
pedwarn (input_location, 0, "empty declaration");
}
}
/* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_*
bits. SPECS represents declaration specifiers that the grammar
only permits to contain type qualifiers and attributes. */
int
quals_from_declspecs (const struct c_declspecs *specs)
{
int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0)
| (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0)
| (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0)
| (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0)
| (ENCODE_QUAL_ADDR_SPACE (specs->address_space)));
gcc_assert (!specs->type
&& !specs->decl_attr
&& specs->typespec_word == cts_none
&& specs->storage_class == csc_none
&& !specs->typedef_p
&& !specs->explicit_signed_p
&& !specs->deprecated_p
&& !specs->long_p
&& !specs->long_long_p
&& !specs->short_p
&& !specs->signed_p
&& !specs->unsigned_p
&& !specs->complex_p
&& !specs->inline_p
&& !specs->noreturn_p
&& !specs->thread_p);
return quals;
}
/* Construct an array declarator. LOC is the location of the
beginning of the array (usually the opening brace). EXPR is the
expression inside [], or NULL_TREE. QUALS are the type qualifiers
inside the [] (to be applied to the pointer to which a parameter
array is converted). STATIC_P is true if "static" is inside the
[], false otherwise. VLA_UNSPEC_P is true if the array is [*], a
VLA of unspecified length which is nevertheless a complete type,
false otherwise. The field for the contained declarator is left to
be filled in by set_array_declarator_inner. */
struct c_declarator *
build_array_declarator (location_t loc,
tree expr, struct c_declspecs *quals, bool static_p,
bool vla_unspec_p)
{
struct c_declarator *declarator = XOBNEW (&parser_obstack,
struct c_declarator);
declarator->id_loc = loc;
declarator->kind = cdk_array;
declarator->declarator = 0;
declarator->u.array.dimen = expr;
if (quals)
{
declarator->u.array.attrs = quals->attrs;
declarator->u.array.quals = quals_from_declspecs (quals);
}
else
{
declarator->u.array.attrs = NULL_TREE;
declarator->u.array.quals = 0;
}
declarator->u.array.static_p = static_p;
declarator->u.array.vla_unspec_p = vla_unspec_p;
if (static_p || quals != NULL)
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support %<static%> or type "
"qualifiers in parameter array declarators");
if (vla_unspec_p)
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support %<[*]%> array declarators");
if (vla_unspec_p)
{
if (!current_scope->parm_flag)
{
/* C99 6.7.5.2p4 */
error_at (loc, "%<[*]%> not allowed in other than "
"function prototype scope");
declarator->u.array.vla_unspec_p = false;
return NULL;
}
current_scope->had_vla_unspec = true;
}
return declarator;
}
/* Set the contained declarator of an array declarator. DECL is the
declarator, as constructed by build_array_declarator; INNER is what
appears on the left of the []. */
struct c_declarator *
set_array_declarator_inner (struct c_declarator *decl,
struct c_declarator *inner)
{
decl->declarator = inner;
return decl;
}
/* INIT is a constructor that forms DECL's initializer. If the final
element initializes a flexible array field, add the size of that
initializer to DECL's size. */
static void
add_flexible_array_elts_to_size (tree decl, tree init)
{
tree elt, type;
if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init)))
return;
elt = CONSTRUCTOR_ELTS (init)->last ().value;
type = TREE_TYPE (elt);
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_SIZE (type) == NULL_TREE
&& TYPE_DOMAIN (type) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)
{
complete_array_type (&type, elt, false);
DECL_SIZE (decl)
= size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type));
DECL_SIZE_UNIT (decl)
= size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type));
}
}
/* Decode a "typename", such as "int **", returning a ..._TYPE node.
Set *EXPR, if EXPR not NULL, to any expression to be evaluated
before the type name, and set *EXPR_CONST_OPERANDS, if
EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may
appear in a constant expression. */
tree
groktypename (struct c_type_name *type_name, tree *expr,
bool *expr_const_operands)
{
tree type;
tree attrs = type_name->specs->attrs;
type_name->specs->attrs = NULL_TREE;
type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME,
false, NULL, &attrs, expr, expr_const_operands,
DEPRECATED_NORMAL);
/* Apply attributes. */
decl_attributes (&type, attrs, 0);
return type;
}
/* Wrapper for decl_attributes that adds some implicit attributes
to VAR_DECLs or FUNCTION_DECLs. */
static tree
c_decl_attributes (tree *node, tree attributes, int flags)
{
/* Add implicit "omp declare target" attribute if requested. */
if (current_omp_declare_target_attribute
&& ((VAR_P (*node) && is_global_var (*node))
|| TREE_CODE (*node) == FUNCTION_DECL))
{
if (VAR_P (*node)
&& !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node)))
attributes = tree_cons (get_identifier ("omp declare target implicit"),
NULL_TREE, attributes);
else
attributes = tree_cons (get_identifier ("omp declare target"),
NULL_TREE, attributes);
}
/* Look up the current declaration with all the attributes merged
so far so that attributes on the current declaration that's
about to be pushed that conflict with the former can be detected,
diagnosed, and rejected as appropriate. */
tree last_decl = lookup_name (DECL_NAME (*node));
if (!last_decl)
last_decl = lookup_name_in_scope (DECL_NAME (*node), external_scope);
return decl_attributes (node, attributes, flags, last_decl);
}
/* Decode a declarator in an ordinary declaration or data definition.
This is called as soon as the type information and variable name
have been parsed, before parsing the initializer if any.
Here we create the ..._DECL node, fill in its type,
and put it on the list of decls for the current context.
The ..._DECL node is returned as the value.
Exception: for arrays where the length is not specified,
the type is left null, to be filled in by `finish_decl'.
Function definitions do not come here; they go to start_function
instead. However, external and forward declarations of functions
do go through here. Structure field declarations are done by
grokfield and not through here. */
tree
start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs,
bool initialized, tree attributes)
{
tree decl;
tree tem;
tree expr = NULL_TREE;
enum deprecated_states deprecated_state = DEPRECATED_NORMAL;
/* An object declared as __attribute__((deprecated)) suppresses
warnings of uses of other deprecated items. */
if (lookup_attribute ("deprecated", attributes))
deprecated_state = DEPRECATED_SUPPRESS;
decl = grokdeclarator (declarator, declspecs,
NORMAL, initialized, NULL, &attributes, &expr, NULL,
deprecated_state);
if (!decl || decl == error_mark_node)
return NULL_TREE;
if (expr)
add_stmt (fold_convert (void_type_node, expr));
if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl)))
warning (OPT_Wmain, "%q+D is usually a function", decl);
if (initialized)
/* Is it valid for this decl to have an initializer at all?
If not, set INITIALIZED to zero, which will indirectly
tell 'finish_decl' to ignore the initializer once it is parsed. */
switch (TREE_CODE (decl))
{
case TYPE_DECL:
error ("typedef %qD is initialized (use __typeof__ instead)", decl);
initialized = false;
break;
case FUNCTION_DECL:
error ("function %qD is initialized like a variable", decl);
initialized = false;
break;
case PARM_DECL:
/* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */
error ("parameter %qD is initialized", decl);
initialized = false;
break;
default:
/* Don't allow initializations for incomplete types except for
arrays which might be completed by the initialization. */
/* This can happen if the array size is an undefined macro.
We already gave a warning, so we don't need another one. */
if (TREE_TYPE (decl) == error_mark_node)
initialized = false;
else if (COMPLETE_TYPE_P (TREE_TYPE (decl)))
{
/* A complete type is ok if size is fixed. */
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST
|| C_DECL_VARIABLE_SIZE (decl))
{
error ("variable-sized object may not be initialized");
initialized = false;
}
}
else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
{
error ("variable %qD has initializer but incomplete type", decl);
initialized = false;
}
else if (C_DECL_VARIABLE_SIZE (decl))
{
/* Although C99 is unclear about whether incomplete arrays
of VLAs themselves count as VLAs, it does not make
sense to permit them to be initialized given that
ordinary VLAs may not be initialized. */
error ("variable-sized object may not be initialized");
initialized = false;
}
}
if (initialized)
{
if (current_scope == file_scope)
TREE_STATIC (decl) = 1;
/* Tell 'pushdecl' this is an initialized decl
even though we don't yet have the initializer expression.
Also tell 'finish_decl' it may store the real initializer. */
DECL_INITIAL (decl) = error_mark_node;
}
/* If this is a function declaration, write a record describing it to the
prototypes file (if requested). */
if (TREE_CODE (decl) == FUNCTION_DECL)
gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl)));
/* ANSI specifies that a tentative definition which is not merged with
a non-tentative definition behaves exactly like a definition with an
initializer equal to zero. (Section 3.7.2)
-fno-common gives strict ANSI behavior, though this tends to break
a large body of code that grew up without this rule.
Thread-local variables are never common, since there's no entrenched
body of code to break, and it allows more efficient variable references
in the presence of dynamic linking. */
if (VAR_P (decl)
&& !initialized
&& TREE_PUBLIC (decl)
&& !DECL_THREAD_LOCAL_P (decl)
&& !flag_no_common)
DECL_COMMON (decl) = 1;
/* Set attributes here so if duplicate decl, will have proper attributes. */
c_decl_attributes (&decl, attributes, 0);
/* Handle gnu_inline attribute. */
if (declspecs->inline_p
&& !flag_gnu89_inline
&& TREE_CODE (decl) == FUNCTION_DECL
&& (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl))
|| current_function_decl))
{
if (declspecs->storage_class == csc_auto && current_scope != file_scope)
;
else if (declspecs->storage_class != csc_static)
DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl);
}
if (TREE_CODE (decl) == FUNCTION_DECL
&& targetm.calls.promote_prototypes (TREE_TYPE (decl)))
{
struct c_declarator *ce = declarator;
if (ce->kind == cdk_pointer)
ce = declarator->declarator;
if (ce->kind == cdk_function)
{
tree args = ce->u.arg_info->parms;
for (; args; args = DECL_CHAIN (args))
{
tree type = TREE_TYPE (args);
if (type && INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (args) = c_type_promotes_to (type);
}
}
}
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl)
&& DECL_UNINLINABLE (decl)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (decl)))
warning (OPT_Wattributes, "inline function %q+D given attribute noinline",
decl);
/* C99 6.7.4p3: An inline definition of a function with external
linkage shall not contain a definition of a modifiable object
with static storage duration... */
if (VAR_P (decl)
&& current_scope != file_scope
&& TREE_STATIC (decl)
&& !TREE_READONLY (decl)
&& DECL_DECLARED_INLINE_P (current_function_decl)
&& DECL_EXTERNAL (current_function_decl))
record_inline_static (input_location, current_function_decl,
decl, csi_modifiable);
if (c_dialect_objc ()
&& VAR_OR_FUNCTION_DECL_P (decl))
objc_check_global_decl (decl);
/* Add this decl to the current scope.
TEM may equal DECL or it may be a previous decl of the same name. */
tem = pushdecl (decl);
if (initialized && DECL_EXTERNAL (tem))
{
DECL_EXTERNAL (tem) = 0;
TREE_STATIC (tem) = 1;
}
return tem;
}
/* Subroutine of finish_decl. TYPE is the type of an uninitialized object
DECL or the non-array element type if DECL is an uninitialized array.
If that type has a const member, diagnose this. */
static void
diagnose_uninitialized_cst_member (tree decl, tree type)
{
tree field;
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
{
tree field_type;
if (TREE_CODE (field) != FIELD_DECL)
continue;
field_type = strip_array_types (TREE_TYPE (field));
if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST)
{
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"uninitialized const member in %qT is invalid in C++",
strip_array_types (TREE_TYPE (decl)));
inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field);
}
if (RECORD_OR_UNION_TYPE_P (field_type))
diagnose_uninitialized_cst_member (decl, field_type);
}
}
/* Finish processing of a declaration;
install its initial value.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
If the length of an array type is not known before,
it must be determined now, from the initial value, or it is an error.
INIT_LOC is the location of the initial value. */
void
finish_decl (tree decl, location_t init_loc, tree init,
tree origtype, tree asmspec_tree)
{
tree type;
bool was_incomplete = (DECL_SIZE (decl) == NULL_TREE);
const char *asmspec = 0;
/* If a name was specified, get the string. */
if (VAR_OR_FUNCTION_DECL_P (decl)
&& DECL_FILE_SCOPE_P (decl))
asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree);
if (asmspec_tree)
asmspec = TREE_STRING_POINTER (asmspec_tree);
if (VAR_P (decl)
&& TREE_STATIC (decl)
&& global_bindings_p ())
/* So decl is a global variable. Record the types it uses
so that we can decide later to emit debug info for them. */
record_types_used_by_current_var_decl (decl);
/* If `start_decl' didn't like having an initialization, ignore it now. */
if (init != NULL_TREE && DECL_INITIAL (decl) == NULL_TREE)
init = NULL_TREE;
/* Don't crash if parm is initialized. */
if (TREE_CODE (decl) == PARM_DECL)
init = NULL_TREE;
if (init)
store_init_value (init_loc, decl, init, origtype);
if (c_dialect_objc () && (VAR_OR_FUNCTION_DECL_P (decl)
|| TREE_CODE (decl) == FIELD_DECL))
objc_check_decl (decl);
type = TREE_TYPE (decl);
/* Deduce size of array from initialization, if not already known. */
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE
&& TREE_CODE (decl) != TYPE_DECL)
{
bool do_default
= (TREE_STATIC (decl)
/* Even if pedantic, an external linkage array
may have incomplete type at first. */
? pedantic && !TREE_PUBLIC (decl)
: !DECL_EXTERNAL (decl));
int failure
= complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl),
do_default);
/* Get the completed type made by complete_array_type. */
type = TREE_TYPE (decl);
switch (failure)
{
case 1:
error ("initializer fails to determine size of %q+D", decl);
break;
case 2:
if (do_default)
error ("array size missing in %q+D", decl);
/* If a `static' var's size isn't known,
make it extern as well as static, so it does not get
allocated.
If it is not `static', then do not mark extern;
finish_incomplete_decl will give it a default size
and it will get allocated. */
else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl))
DECL_EXTERNAL (decl) = 1;
break;
case 3:
error ("zero or negative size array %q+D", decl);
break;
case 0:
/* For global variables, update the copy of the type that
exists in the binding. */
if (TREE_PUBLIC (decl))
{
struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl));
while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext))
b_ext = b_ext->shadowed;
if (b_ext && TREE_CODE (decl) == TREE_CODE (b_ext->decl))
{
if (b_ext->u.type && comptypes (b_ext->u.type, type))
b_ext->u.type = composite_type (b_ext->u.type, type);
else
b_ext->u.type = type;
}
}
break;
default:
gcc_unreachable ();
}
if (DECL_INITIAL (decl))
TREE_TYPE (DECL_INITIAL (decl)) = type;
relayout_decl (decl);
}
if (VAR_P (decl))
{
if (init && TREE_CODE (init) == CONSTRUCTOR)
add_flexible_array_elts_to_size (decl, init);
if (DECL_SIZE (decl) == NULL_TREE && TREE_TYPE (decl) != error_mark_node
&& COMPLETE_TYPE_P (TREE_TYPE (decl)))
layout_decl (decl, 0);
if (DECL_SIZE (decl) == NULL_TREE
/* Don't give an error if we already gave one earlier. */
&& TREE_TYPE (decl) != error_mark_node
&& (TREE_STATIC (decl)
/* A static variable with an incomplete type
is an error if it is initialized.
Also if it is not file scope.
Otherwise, let it through, but if it is not `extern'
then it may cause an error message later. */
? (DECL_INITIAL (decl) != NULL_TREE
|| !DECL_FILE_SCOPE_P (decl))
/* An automatic variable with an incomplete type
is an error. */
: !DECL_EXTERNAL (decl)))
{
error ("storage size of %q+D isn%'t known", decl);
TREE_TYPE (decl) = error_mark_node;
}
if ((RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl))
|| TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
&& DECL_SIZE (decl) == NULL_TREE
&& TREE_STATIC (decl))
incomplete_record_decls.safe_push (decl);
if (is_global_var (decl) && DECL_SIZE (decl) != NULL_TREE)
{
if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
constant_expression_warning (DECL_SIZE (decl));
else
{
error ("storage size of %q+D isn%'t constant", decl);
TREE_TYPE (decl) = error_mark_node;
}
}
if (TREE_USED (type))
{
TREE_USED (decl) = 1;
DECL_READ_P (decl) = 1;
}
}
/* If this is a function and an assembler name is specified, reset DECL_RTL
so we can give it its new name. Also, update builtin_decl if it
was a normal built-in. */
if (TREE_CODE (decl) == FUNCTION_DECL && asmspec)
{
if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
set_builtin_user_assembler_name (decl, asmspec);
set_user_assembler_name (decl, asmspec);
}
/* If #pragma weak was used, mark the decl weak now. */
maybe_apply_pragma_weak (decl);
/* Output the assembler code and/or RTL code for variables and functions,
unless the type is an undefined structure or union.
If not, it will get done when the type is completed. */
if (VAR_OR_FUNCTION_DECL_P (decl))
{
/* Determine the ELF visibility. */
if (TREE_PUBLIC (decl))
c_determine_visibility (decl);
/* This is a no-op in c-lang.c or something real in objc-act.c. */
if (c_dialect_objc ())
objc_check_decl (decl);
if (asmspec)
{
/* If this is not a static variable, issue a warning.
It doesn't make any sense to give an ASMSPEC for an
ordinary, non-register local variable. Historically,
GCC has accepted -- but ignored -- the ASMSPEC in
this case. */
if (!DECL_FILE_SCOPE_P (decl)
&& VAR_P (decl)
&& !C_DECL_REGISTER (decl)
&& !TREE_STATIC (decl))
warning (0, "ignoring asm-specifier for non-static local "
"variable %q+D", decl);
else
set_user_assembler_name (decl, asmspec);
}
if (DECL_FILE_SCOPE_P (decl))
{
if (DECL_INITIAL (decl) == NULL_TREE
|| DECL_INITIAL (decl) == error_mark_node)
/* Don't output anything
when a tentative file-scope definition is seen.
But at end of compilation, do output code for them. */
DECL_DEFER_OUTPUT (decl) = 1;
if (asmspec && VAR_P (decl) && C_DECL_REGISTER (decl))
DECL_HARD_REGISTER (decl) = 1;
rest_of_decl_compilation (decl, true, 0);
}
else
{
/* In conjunction with an ASMSPEC, the `register'
keyword indicates that we should place the variable
in a particular register. */
if (asmspec && C_DECL_REGISTER (decl))
{
DECL_HARD_REGISTER (decl) = 1;
/* This cannot be done for a structure with volatile
fields, on which DECL_REGISTER will have been
reset. */
if (!DECL_REGISTER (decl))
error ("cannot put object with volatile field into register");
}
if (TREE_CODE (decl) != FUNCTION_DECL)
{
/* If we're building a variable sized type, and we might be
reachable other than via the top of the current binding
level, then create a new BIND_EXPR so that we deallocate
the object at the right time. */
/* Note that DECL_SIZE can be null due to errors. */
if (DECL_SIZE (decl)
&& !TREE_CONSTANT (DECL_SIZE (decl))
&& STATEMENT_LIST_HAS_LABEL (cur_stmt_list))
{
tree bind;
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
add_stmt (bind);
BIND_EXPR_BODY (bind) = push_stmt_list ();
}
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl),
DECL_EXPR, decl));
}
}
if (!DECL_FILE_SCOPE_P (decl))
{
/* Recompute the RTL of a local array now
if it used to be an incomplete type. */
if (was_incomplete && !is_global_var (decl))
{
/* If we used it already as memory, it must stay in memory. */
TREE_ADDRESSABLE (decl) = TREE_USED (decl);
/* If it's still incomplete now, no init will save it. */
if (DECL_SIZE (decl) == NULL_TREE)
DECL_INITIAL (decl) = NULL_TREE;
}
}
}
if (TREE_CODE (decl) == TYPE_DECL)
{
if (!DECL_FILE_SCOPE_P (decl)
&& variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl));
rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0);
}
/* Install a cleanup (aka destructor) if one was given. */
if (VAR_P (decl) && !TREE_STATIC (decl))
{
tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl));
if (attr)
{
tree cleanup_id = TREE_VALUE (TREE_VALUE (attr));
tree cleanup_decl = lookup_name (cleanup_id);
tree cleanup;
vec<tree, va_gc> *v;
/* Build "cleanup(&decl)" for the destructor. */
cleanup = build_unary_op (input_location, ADDR_EXPR, decl, false);
vec_alloc (v, 1);
v->quick_push (cleanup);
cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl),
vNULL, cleanup_decl, v, NULL);
vec_free (v);
/* Don't warn about decl unused; the cleanup uses it. */
TREE_USED (decl) = 1;
TREE_USED (cleanup_decl) = 1;
DECL_READ_P (decl) = 1;
push_cleanup (decl, cleanup, false);
}
}
if (warn_cxx_compat
&& VAR_P (decl)
&& !DECL_EXTERNAL (decl)
&& DECL_INITIAL (decl) == NULL_TREE)
{
type = strip_array_types (type);
if (TREE_READONLY (decl))
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"uninitialized const %qD is invalid in C++", decl);
else if (RECORD_OR_UNION_TYPE_P (type)
&& C_TYPE_FIELDS_READONLY (type))
diagnose_uninitialized_cst_member (decl, type);
}
if (flag_openmp
&& VAR_P (decl)
&& lookup_attribute ("omp declare target implicit",
DECL_ATTRIBUTES (decl)))
{
DECL_ATTRIBUTES (decl)
= remove_attribute ("omp declare target implicit",
DECL_ATTRIBUTES (decl));
if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (decl)))
error ("%q+D in declare target directive does not have mappable type",
decl);
else if (!lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (decl))
&& !lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
DECL_ATTRIBUTES (decl)
= tree_cons (get_identifier ("omp declare target"),
NULL_TREE, DECL_ATTRIBUTES (decl));
}
invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl);
}
/* Given a parsed parameter declaration, decode it into a PARM_DECL.
EXPR is NULL or a pointer to an expression that needs to be
evaluated for the side effects of array size expressions in the
parameters. */
tree
grokparm (const struct c_parm *parm, tree *expr)
{
tree attrs = parm->attrs;
tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false,
NULL, &attrs, expr, NULL, DEPRECATED_NORMAL);
decl_attributes (&decl, attrs, 0);
return decl;
}
/* Given a parsed parameter declaration, decode it into a PARM_DECL
and push that on the current scope. EXPR is a pointer to an
expression that needs to be evaluated for the side effects of array
size expressions in the parameters. */
void
push_parm_decl (const struct c_parm *parm, tree *expr)
{
tree attrs = parm->attrs;
tree decl;
decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL,
&attrs, expr, NULL, DEPRECATED_NORMAL);
if (decl && DECL_P (decl))
DECL_SOURCE_LOCATION (decl) = parm->loc;
decl_attributes (&decl, attrs, 0);
decl = pushdecl (decl);
finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE);
}
/* Mark all the parameter declarations to date as forward decls.
Also diagnose use of this extension. */
void
mark_forward_parm_decls (void)
{
struct c_binding *b;
if (pedantic && !current_scope->warned_forward_parm_decls)
{
pedwarn (input_location, OPT_Wpedantic,
"ISO C forbids forward parameter declarations");
current_scope->warned_forward_parm_decls = true;
}
for (b = current_scope->bindings; b; b = b->prev)
if (TREE_CODE (b->decl) == PARM_DECL)
TREE_ASM_WRITTEN (b->decl) = 1;
}
/* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound
literal, which may be an incomplete array type completed by the
initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound
literal. NON_CONST is true if the initializers contain something
that cannot occur in a constant expression. If ALIGNAS_ALIGN is nonzero,
it is the (valid) alignment for this compound literal, as specified
with _Alignas. */
tree
build_compound_literal (location_t loc, tree type, tree init, bool non_const,
unsigned int alignas_align)
{
/* We do not use start_decl here because we have a type, not a declarator;
and do not use finish_decl because the decl should be stored inside
the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */
tree decl;
tree complit;
tree stmt;
if (type == error_mark_node
|| init == error_mark_node)
return error_mark_node;
decl = build_decl (loc, VAR_DECL, NULL_TREE, type);
DECL_EXTERNAL (decl) = 0;
TREE_PUBLIC (decl) = 0;
TREE_STATIC (decl) = (current_scope == file_scope);
DECL_CONTEXT (decl) = current_function_decl;
TREE_USED (decl) = 1;
DECL_READ_P (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
TREE_TYPE (decl) = type;
c_apply_type_quals_to_decl (TYPE_QUALS (strip_array_types (type)), decl);
if (alignas_align)
{
SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT);
DECL_USER_ALIGN (decl) = 1;
}
store_init_value (loc, decl, init, NULL_TREE);
if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type))
{
int failure = complete_array_type (&TREE_TYPE (decl),
DECL_INITIAL (decl), true);
/* If complete_array_type returns 3, it means that the
initial value of the compound literal is empty. Allow it. */
gcc_assert (failure == 0 || failure == 3);
type = TREE_TYPE (decl);
TREE_TYPE (DECL_INITIAL (decl)) = type;
}
if (type == error_mark_node || !COMPLETE_TYPE_P (type))
{
c_incomplete_type_error (loc, NULL_TREE, type);
return error_mark_node;
}
stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl);
complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt);
TREE_SIDE_EFFECTS (complit) = 1;
layout_decl (decl, 0);
if (TREE_STATIC (decl))
{
/* This decl needs a name for the assembler output. */
set_compound_literal_name (decl);
DECL_DEFER_OUTPUT (decl) = 1;
DECL_COMDAT (decl) = 1;
pushdecl (decl);
rest_of_decl_compilation (decl, 1, 0);
}
if (non_const)
{
complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit);
C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1;
}
return complit;
}
/* Check the type of a compound literal. Here we just check that it
is valid for C++. */
void
check_compound_literal_type (location_t loc, struct c_type_name *type_name)
{
if (warn_cxx_compat
&& (type_name->specs->typespec_kind == ctsk_tagdef
|| type_name->specs->typespec_kind == ctsk_tagfirstref))
warning_at (loc, OPT_Wc___compat,
"defining a type in a compound literal is invalid in C++");
}
/* Determine whether TYPE is a structure with a flexible array member,
or a union containing such a structure (possibly recursively). */
static bool
flexible_array_type_p (tree type)
{
tree x;
switch (TREE_CODE (type))
{
case RECORD_TYPE:
x = TYPE_FIELDS (type);
if (x == NULL_TREE)
return false;
while (DECL_CHAIN (x) != NULL_TREE)
x = DECL_CHAIN (x);
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
&& TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE)
return true;
return false;
case UNION_TYPE:
for (x = TYPE_FIELDS (type); x != NULL_TREE; x = DECL_CHAIN (x))
{
if (flexible_array_type_p (TREE_TYPE (x)))
return true;
}
return false;
default:
return false;
}
}
/* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME,
replacing with appropriate values if they are invalid. */
static void
check_bitfield_type_and_width (location_t loc, tree *type, tree *width,
tree orig_name)
{
tree type_mv;
unsigned int max_width;
unsigned HOST_WIDE_INT w;
const char *name = (orig_name
? identifier_to_locale (IDENTIFIER_POINTER (orig_name))
: _("<anonymous>"));
/* Detect and ignore out of range field width and process valid
field widths. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (*width)))
{
error_at (loc, "bit-field %qs width not an integer constant", name);
*width = integer_one_node;
}
else
{
if (TREE_CODE (*width) != INTEGER_CST)
{
*width = c_fully_fold (*width, false, NULL);
if (TREE_CODE (*width) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"bit-field %qs width not an integer constant expression",
name);
}
if (TREE_CODE (*width) != INTEGER_CST)
{
error_at (loc, "bit-field %qs width not an integer constant", name);
*width = integer_one_node;
}
constant_expression_warning (*width);
if (tree_int_cst_sgn (*width) < 0)
{
error_at (loc, "negative width in bit-field %qs", name);
*width = integer_one_node;
}
else if (integer_zerop (*width) && orig_name)
{
error_at (loc, "zero width for bit-field %qs", name);
*width = integer_one_node;
}
}
/* Detect invalid bit-field type. */
if (TREE_CODE (*type) != INTEGER_TYPE
&& TREE_CODE (*type) != BOOLEAN_TYPE
&& TREE_CODE (*type) != ENUMERAL_TYPE)
{
error_at (loc, "bit-field %qs has invalid type", name);
*type = unsigned_type_node;
}
if (TYPE_WARN_IF_NOT_ALIGN (*type))
{
error_at (loc, "cannot declare bit-field %qs with %<warn_if_not_aligned%> type",
name);
*type = unsigned_type_node;
}
type_mv = TYPE_MAIN_VARIANT (*type);
if (!in_system_header_at (input_location)
&& type_mv != integer_type_node
&& type_mv != unsigned_type_node
&& type_mv != boolean_type_node)
pedwarn_c90 (loc, OPT_Wpedantic,
"type of bit-field %qs is a GCC extension", name);
max_width = TYPE_PRECISION (*type);
if (compare_tree_int (*width, max_width) > 0)
{
error_at (loc, "width of %qs exceeds its type", name);
w = max_width;
*width = build_int_cst (integer_type_node, w);
}
else
w = tree_to_uhwi (*width);
if (TREE_CODE (*type) == ENUMERAL_TYPE)
{
struct lang_type *lt = TYPE_LANG_SPECIFIC (*type);
if (!lt
|| w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type))
|| w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type)))
warning_at (loc, 0, "%qs is narrower than values of its type", name);
}
}
/* Print warning about variable length array if necessary. */
static void
warn_variable_length_array (tree name, tree size)
{
if (TREE_CONSTANT (size))
{
if (name)
pedwarn_c90 (input_location, OPT_Wvla,
"ISO C90 forbids array %qE whose size "
"can%'t be evaluated", name);
else
pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array "
"whose size can%'t be evaluated");
}
else
{
if (name)
pedwarn_c90 (input_location, OPT_Wvla,
"ISO C90 forbids variable length array %qE", name);
else
pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable "
"length array");
}
}
/* Print warning about defaulting to int if necessary. */
static void
warn_defaults_to (location_t location, int opt, const char *gmsgid, ...)
{
diagnostic_info diagnostic;
va_list ap;
rich_location richloc (line_table, location);
va_start (ap, gmsgid);
diagnostic_set_info (&diagnostic, gmsgid, &ap, &richloc,
flag_isoc99 ? DK_PEDWARN : DK_WARNING);
diagnostic.option_index = opt;
diagnostic_report_diagnostic (global_dc, &diagnostic);
va_end (ap);
}
/* Returns the smallest location != UNKNOWN_LOCATION in LOCATIONS,
considering only those c_declspec_words found in LIST, which
must be terminated by cdw_number_of_elements. */
static location_t
smallest_type_quals_location (const location_t *locations,
const c_declspec_word *list)
{
location_t loc = UNKNOWN_LOCATION;
while (*list != cdw_number_of_elements)
{
location_t newloc = locations[*list];
if (loc == UNKNOWN_LOCATION
|| (newloc != UNKNOWN_LOCATION && newloc < loc))
loc = newloc;
list++;
}
return loc;
}
/* Given declspecs and a declarator,
determine the name and type of the object declared
and construct a ..._DECL node for it.
(In one case we can return a ..._TYPE node instead.
For invalid input we sometimes return NULL_TREE.)
DECLSPECS is a c_declspecs structure for the declaration specifiers.
DECL_CONTEXT says which syntactic context this declaration is in:
NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL.
FUNCDEF for a function definition. Like NORMAL but a few different
error messages in each case. Return value may be zero meaning
this definition is too screwy to try to parse.
PARM for a parameter declaration (either within a function prototype
or before a function body). Make a PARM_DECL, or return void_type_node.
TYPENAME if for a typename (in a cast or sizeof).
Don't make a DECL node; just return the ..._TYPE node.
FIELD for a struct or union field; make a FIELD_DECL.
INITIALIZED is true if the decl has an initializer.
WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node
representing the width of the bit-field.
DECL_ATTRS points to the list of attributes that should be added to this
decl. Any nested attributes that belong on the decl itself will be
added to this list.
If EXPR is not NULL, any expressions that need to be evaluated as
part of evaluating variably modified types will be stored in *EXPR.
If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be
set to indicate whether operands in *EXPR can be used in constant
expressions.
DEPRECATED_STATE is a deprecated_states value indicating whether
deprecation warnings should be suppressed.
In the TYPENAME case, DECLARATOR is really an absolute declarator.
It may also be so in the PARM case, for a prototype where the
argument type is specified but not the name.
This function is where the complicated C meanings of `static'
and `extern' are interpreted. */
static tree
grokdeclarator (const struct c_declarator *declarator,
struct c_declspecs *declspecs,
enum decl_context decl_context, bool initialized, tree *width,
tree *decl_attrs, tree *expr, bool *expr_const_operands,
enum deprecated_states deprecated_state)
{
tree type = declspecs->type;
bool threadp = declspecs->thread_p;
enum c_storage_class storage_class = declspecs->storage_class;
int constp;
int restrictp;
int volatilep;
int atomicp;
int type_quals = TYPE_UNQUALIFIED;
tree name = NULL_TREE;
bool funcdef_flag = false;
bool funcdef_syntax = false;
bool size_varies = false;
tree decl_attr = declspecs->decl_attr;
int array_ptr_quals = TYPE_UNQUALIFIED;
tree array_ptr_attrs = NULL_TREE;
bool array_parm_static = false;
bool array_parm_vla_unspec_p = false;
tree returned_attrs = NULL_TREE;
bool bitfield = width != NULL;
tree element_type;
tree orig_qual_type = NULL;
size_t orig_qual_indirect = 0;
struct c_arg_info *arg_info = 0;
addr_space_t as1, as2, address_space;
location_t loc = UNKNOWN_LOCATION;
tree expr_dummy;
bool expr_const_operands_dummy;
enum c_declarator_kind first_non_attr_kind;
unsigned int alignas_align = 0;
if (TREE_CODE (type) == ERROR_MARK)
return error_mark_node;
if (expr == NULL)
{
expr = &expr_dummy;
expr_dummy = NULL_TREE;
}
if (expr_const_operands == NULL)
expr_const_operands = &expr_const_operands_dummy;
if (declspecs->expr)
{
if (*expr)
*expr = build2 (COMPOUND_EXPR, TREE_TYPE (declspecs->expr), *expr,
declspecs->expr);
else
*expr = declspecs->expr;
}
*expr_const_operands = declspecs->expr_const_operands;
if (decl_context == FUNCDEF)
funcdef_flag = true, decl_context = NORMAL;
/* Look inside a declarator for the name being declared
and get it as an IDENTIFIER_NODE, for an error message. */
{
const struct c_declarator *decl = declarator;
first_non_attr_kind = cdk_attrs;
while (decl)
switch (decl->kind)
{
case cdk_array:
loc = decl->id_loc;
/* FALL THRU. */
case cdk_function:
case cdk_pointer:
funcdef_syntax = (decl->kind == cdk_function);
if (first_non_attr_kind == cdk_attrs)
first_non_attr_kind = decl->kind;
decl = decl->declarator;
break;
case cdk_attrs:
decl = decl->declarator;
break;
case cdk_id:
loc = decl->id_loc;
if (decl->u.id)
name = decl->u.id;
if (first_non_attr_kind == cdk_attrs)
first_non_attr_kind = decl->kind;
decl = 0;
break;
default:
gcc_unreachable ();
}
if (name == NULL_TREE)
{
gcc_assert (decl_context == PARM
|| decl_context == TYPENAME
|| (decl_context == FIELD
&& declarator->kind == cdk_id));
gcc_assert (!initialized);
}
}
/* A function definition's declarator must have the form of
a function declarator. */
if (funcdef_flag && !funcdef_syntax)
return NULL_TREE;
/* If this looks like a function definition, make it one,
even if it occurs where parms are expected.
Then store_parm_decls will reject it and not use it as a parm. */
if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag)
decl_context = PARM;
if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS)
warn_deprecated_use (declspecs->type, declspecs->decl_attr);
if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope
&& variably_modified_type_p (type, NULL_TREE))
{
if (name)
error_at (loc, "variably modified %qE at file scope", name);
else
error_at (loc, "variably modified field at file scope");
type = integer_type_node;
}
size_varies = C_TYPE_VARIABLE_SIZE (type) != 0;
/* Diagnose defaulting to "int". */
if (declspecs->default_int_p && !in_system_header_at (input_location))
{
/* Issue a warning if this is an ISO C 99 program or if
-Wreturn-type and this is a function, or if -Wimplicit;
prefer the former warning since it is more explicit. */
if ((warn_implicit_int || warn_return_type || flag_isoc99)
&& funcdef_flag)
warn_about_return_type = 1;
else
{
if (name)
warn_defaults_to (loc, OPT_Wimplicit_int,
"type defaults to %<int%> in declaration "
"of %qE", name);
else
warn_defaults_to (loc, OPT_Wimplicit_int,
"type defaults to %<int%> in type name");
}
}
/* Adjust the type if a bit-field is being declared,
-funsigned-bitfields applied and the type is not explicitly
"signed". */
if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p
&& TREE_CODE (type) == INTEGER_TYPE)
type = unsigned_type_for (type);
/* Figure out the type qualifiers for the declaration. There are
two ways a declaration can become qualified. One is something
like `const int i' where the `const' is explicit. Another is
something like `typedef const int CI; CI i' where the type of the
declaration contains the `const'. A third possibility is that
there is a type qualifier on the element type of a typedefed
array type, in which case we should extract that qualifier so
that c_apply_type_quals_to_decl receives the full list of
qualifiers to work with (C90 is not entirely clear about whether
duplicate qualifiers should be diagnosed in this case, but it
seems most appropriate to do so). */
element_type = strip_array_types (type);
constp = declspecs->const_p + TYPE_READONLY (element_type);
restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type);
volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type);
atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type);
as1 = declspecs->address_space;
as2 = TYPE_ADDR_SPACE (element_type);
address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1;
if (constp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>");
if (restrictp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>");
if (volatilep > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>");
if (atomicp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>");
if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2)
error_at (loc, "conflicting named address spaces (%s vs %s)",
c_addr_space_name (as1), c_addr_space_name (as2));
if ((TREE_CODE (type) == ARRAY_TYPE
|| first_non_attr_kind == cdk_array)
&& TYPE_QUALS (element_type))
{
orig_qual_type = type;
type = TYPE_MAIN_VARIANT (type);
}
type_quals = ((constp ? TYPE_QUAL_CONST : 0)
| (restrictp ? TYPE_QUAL_RESTRICT : 0)
| (volatilep ? TYPE_QUAL_VOLATILE : 0)
| (atomicp ? TYPE_QUAL_ATOMIC : 0)
| ENCODE_QUAL_ADDR_SPACE (address_space));
if (type_quals != TYPE_QUALS (element_type))
orig_qual_type = NULL_TREE;
/* Applying the _Atomic qualifier to an array type (through the use
of typedefs or typeof) must be detected here. If the qualifier
is introduced later, any appearance of applying it to an array is
actually applying it to an element of that array. */
if (declspecs->atomic_p && TREE_CODE (type) == ARRAY_TYPE)
error_at (loc, "%<_Atomic%>-qualified array type");
/* Warn about storage classes that are invalid for certain
kinds of declarations (parameters, typenames, etc.). */
if (funcdef_flag
&& (threadp
|| storage_class == csc_auto
|| storage_class == csc_register
|| storage_class == csc_typedef))
{
if (storage_class == csc_auto)
pedwarn (loc,
(current_scope == file_scope) ? 0 : OPT_Wpedantic,
"function definition declared %<auto%>");
if (storage_class == csc_register)
error_at (loc, "function definition declared %<register%>");
if (storage_class == csc_typedef)
error_at (loc, "function definition declared %<typedef%>");
if (threadp)
error_at (loc, "function definition declared %qs",
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
threadp = false;
if (storage_class == csc_auto
|| storage_class == csc_register
|| storage_class == csc_typedef)
storage_class = csc_none;
}
else if (decl_context != NORMAL && (storage_class != csc_none || threadp))
{
if (decl_context == PARM && storage_class == csc_register)
;
else
{
switch (decl_context)
{
case FIELD:
if (name)
error_at (loc, "storage class specified for structure "
"field %qE", name);
else
error_at (loc, "storage class specified for structure field");
break;
case PARM:
if (name)
error_at (loc, "storage class specified for parameter %qE",
name);
else
error_at (loc, "storage class specified for unnamed parameter");
break;
default:
error_at (loc, "storage class specified for typename");
break;
}
storage_class = csc_none;
threadp = false;
}
}
else if (storage_class == csc_extern
&& initialized
&& !funcdef_flag)
{
/* 'extern' with initialization is invalid if not at file scope. */
if (current_scope == file_scope)
{
/* It is fine to have 'extern const' when compiling at C
and C++ intersection. */
if (!(warn_cxx_compat && constp))
warning_at (loc, 0, "%qE initialized and declared %<extern%>",
name);
}
else
error_at (loc, "%qE has both %<extern%> and initializer", name);
}
else if (current_scope == file_scope)
{
if (storage_class == csc_auto)
error_at (loc, "file-scope declaration of %qE specifies %<auto%>",
name);
if (pedantic && storage_class == csc_register)
pedwarn (input_location, OPT_Wpedantic,
"file-scope declaration of %qE specifies %<register%>", name);
}
else
{
if (storage_class == csc_extern && funcdef_flag)
error_at (loc, "nested function %qE declared %<extern%>", name);
else if (threadp && storage_class == csc_none)
{
error_at (loc, "function-scope %qE implicitly auto and declared "
"%qs", name,
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
threadp = false;
}
}
/* Now figure out the structure of the declarator proper.
Descend through it, creating more complex types, until we reach
the declared identifier (or NULL_TREE, in an absolute declarator).
At each stage we maintain an unqualified version of the type
together with any qualifiers that should be applied to it with
c_build_qualified_type; this way, array types including
multidimensional array types are first built up in unqualified
form and then the qualified form is created with
TYPE_MAIN_VARIANT pointing to the unqualified form. */
while (declarator && declarator->kind != cdk_id)
{
if (type == error_mark_node)
{
declarator = declarator->declarator;
continue;
}
/* Each level of DECLARATOR is either a cdk_array (for ...[..]),
a cdk_pointer (for *...),
a cdk_function (for ...(...)),
a cdk_attrs (for nested attributes),
or a cdk_id (for the name being declared
or the place in an absolute declarator
where the name was omitted).
For the last case, we have just exited the loop.
At this point, TYPE is the type of elements of an array,
or for a function to return, or for a pointer to point to.
After this sequence of ifs, TYPE is the type of the
array or function or pointer, and DECLARATOR has had its
outermost layer removed. */
if (array_ptr_quals != TYPE_UNQUALIFIED
|| array_ptr_attrs != NULL_TREE
|| array_parm_static)
{
/* Only the innermost declarator (making a parameter be of
array type which is converted to pointer type)
may have static or type qualifiers. */
error_at (loc, "static or type qualifiers in non-parameter array declarator");
array_ptr_quals = TYPE_UNQUALIFIED;
array_ptr_attrs = NULL_TREE;
array_parm_static = false;
}
switch (declarator->kind)
{
case cdk_attrs:
{
/* A declarator with embedded attributes. */
tree attrs = declarator->u.attrs;
const struct c_declarator *inner_decl;
int attr_flags = 0;
declarator = declarator->declarator;
inner_decl = declarator;
while (inner_decl->kind == cdk_attrs)
inner_decl = inner_decl->declarator;
if (inner_decl->kind == cdk_id)
attr_flags |= (int) ATTR_FLAG_DECL_NEXT;
else if (inner_decl->kind == cdk_function)
attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT;
else if (inner_decl->kind == cdk_array)
attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT;
returned_attrs = decl_attributes (&type,
chainon (returned_attrs, attrs),
attr_flags);
break;
}
case cdk_array:
{
tree itype = NULL_TREE;
tree size = declarator->u.array.dimen;
/* The index is a signed object `sizetype' bits wide. */
tree index_type = c_common_signed_type (sizetype);
array_ptr_quals = declarator->u.array.quals;
array_ptr_attrs = declarator->u.array.attrs;
array_parm_static = declarator->u.array.static_p;
array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p;
declarator = declarator->declarator;
/* Check for some types that there cannot be arrays of. */
if (VOID_TYPE_P (type))
{
if (name)
error_at (loc, "declaration of %qE as array of voids", name);
else
error_at (loc, "declaration of type name as array of voids");
type = error_mark_node;
}
if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (name)
error_at (loc, "declaration of %qE as array of functions",
name);
else
error_at (loc, "declaration of type name as array of "
"functions");
type = error_mark_node;
}
if (pedantic && !in_system_header_at (input_location)
&& flexible_array_type_p (type))
pedwarn (loc, OPT_Wpedantic,
"invalid use of structure with flexible array member");
if (size == error_mark_node)
type = error_mark_node;
if (type == error_mark_node)
continue;
/* If size was specified, set ITYPE to a range-type for
that size. Otherwise, ITYPE remains null. finish_decl
may figure it out from an initial value. */
if (size)
{
bool size_maybe_const = true;
bool size_int_const = (TREE_CODE (size) == INTEGER_CST
&& !TREE_OVERFLOW (size));
bool this_size_varies = false;
/* Strip NON_LVALUE_EXPRs since we aren't using as an
lvalue. */
STRIP_TYPE_NOPS (size);
if (!INTEGRAL_TYPE_P (TREE_TYPE (size)))
{
if (name)
error_at (loc, "size of array %qE has non-integer type",
name);
else
error_at (loc,
"size of unnamed array has non-integer type");
size = integer_one_node;
}
/* This can happen with enum forward declaration. */
else if (!COMPLETE_TYPE_P (TREE_TYPE (size)))
{
if (name)
error_at (loc, "size of array %qE has incomplete type",
name);
else
error_at (loc, "size of unnamed array has incomplete "
"type");
size = integer_one_node;
}
size = c_fully_fold (size, false, &size_maybe_const);
if (pedantic && size_maybe_const && integer_zerop (size))
{
if (name)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids zero-size array %qE", name);
else
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids zero-size array");
}
if (TREE_CODE (size) == INTEGER_CST && size_maybe_const)
{
constant_expression_warning (size);
if (tree_int_cst_sgn (size) < 0)
{
if (name)
error_at (loc, "size of array %qE is negative", name);
else
error_at (loc, "size of unnamed array is negative");
size = integer_one_node;
}
/* Handle a size folded to an integer constant but
not an integer constant expression. */
if (!size_int_const)
{
/* If this is a file scope declaration of an
ordinary identifier, this is invalid code;
diagnosing it here and not subsequently
treating the type as variable-length avoids
more confusing diagnostics later. */
if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope)
pedwarn (input_location, 0,
"variably modified %qE at file scope",
name);
else
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
}
}
else if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope)
{
error_at (loc, "variably modified %qE at file scope", name);
size = integer_one_node;
}
else
{
/* Make sure the array size remains visibly
nonconstant even if it is (eg) a const variable
with known value. */
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
if (sanitize_flags_p (SANITIZE_VLA)
&& current_function_decl != NULL_TREE
&& decl_context == NORMAL)
{
/* Evaluate the array size only once. */
size = save_expr (size);
size = c_fully_fold (size, false, NULL);
size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size),
ubsan_instrument_vla (loc, size),
size);
}
}
if (integer_zerop (size) && !this_size_varies)
{
/* A zero-length array cannot be represented with
an unsigned index type, which is what we'll
get with build_index_type. Create an
open-ended range instead. */
itype = build_range_type (sizetype, size, NULL_TREE);
}
else
{
/* Arrange for the SAVE_EXPR on the inside of the
MINUS_EXPR, which allows the -1 to get folded
with the +1 that happens when building TYPE_SIZE. */
if (size_varies)
size = save_expr (size);
if (this_size_varies && TREE_CODE (size) == INTEGER_CST)
size = build2 (COMPOUND_EXPR, TREE_TYPE (size),
integer_zero_node, size);
/* Compute the maximum valid index, that is, size
- 1. Do the calculation in index_type, so that
if it is a variable the computations will be
done in the proper mode. */
itype = fold_build2_loc (loc, MINUS_EXPR, index_type,
convert (index_type, size),
convert (index_type,
size_one_node));
/* The above overflows when size does not fit
in index_type.
??? While a size of INT_MAX+1 technically shouldn't
cause an overflow (because we subtract 1), handling
this case seems like an unnecessary complication. */
if (TREE_CODE (size) == INTEGER_CST
&& !int_fits_type_p (size, index_type))
{
if (name)
error_at (loc, "size of array %qE is too large",
name);
else
error_at (loc, "size of unnamed array is too large");
type = error_mark_node;
continue;
}
itype = build_index_type (itype);
}
if (this_size_varies)
{
if (*expr)
*expr = build2 (COMPOUND_EXPR, TREE_TYPE (size),
*expr, size);
else
*expr = size;
*expr_const_operands &= size_maybe_const;
}
}
else if (decl_context == FIELD)
{
bool flexible_array_member = false;
if (array_parm_vla_unspec_p)
/* Field names can in fact have function prototype
scope so [*] is disallowed here through making
the field variably modified, not through being
something other than a declaration with function
prototype scope. */
size_varies = true;
else
{
const struct c_declarator *t = declarator;
while (t->kind == cdk_attrs)
t = t->declarator;
flexible_array_member = (t->kind == cdk_id);
}
if (flexible_array_member
&& !in_system_header_at (input_location))
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not "
"support flexible array members");
/* ISO C99 Flexible array members are effectively
identical to GCC's zero-length array extension. */
if (flexible_array_member || array_parm_vla_unspec_p)
itype = build_range_type (sizetype, size_zero_node,
NULL_TREE);
}
else if (decl_context == PARM)
{
if (array_parm_vla_unspec_p)
{
itype = build_range_type (sizetype, size_zero_node, NULL_TREE);
size_varies = true;
}
}
else if (decl_context == TYPENAME)
{
if (array_parm_vla_unspec_p)
{
/* C99 6.7.5.2p4 */
warning (0, "%<[*]%> not in a declaration");
/* We use this to avoid messing up with incomplete
array types of the same type, that would
otherwise be modified below. */
itype = build_range_type (sizetype, size_zero_node,
NULL_TREE);
size_varies = true;
}
}
/* Complain about arrays of incomplete types. */
if (!COMPLETE_TYPE_P (type))
{
error_at (loc, "array type has incomplete element type %qT",
type);
/* See if we can be more helpful. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (name)
inform (loc, "declaration of %qE as multidimensional "
"array must have bounds for all dimensions "
"except the first", name);
else
inform (loc, "declaration of multidimensional array "
"must have bounds for all dimensions except "
"the first");
}
type = error_mark_node;
}
else
/* When itype is NULL, a shared incomplete array type is
returned for all array of a given type. Elsewhere we
make sure we don't complete that type before copying
it, but here we want to make sure we don't ever
modify the shared type, so we gcc_assert (itype)
below. */
{
addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals);
if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type))
type = build_qualified_type (type,
ENCODE_QUAL_ADDR_SPACE (as));
type = build_array_type (type, itype);
}
if (type != error_mark_node)
{
if (size_varies)
{
/* It is ok to modify type here even if itype is
NULL: if size_varies, we're in a
multi-dimensional array and the inner type has
variable size, so the enclosing shared array type
must too. */
if (size && TREE_CODE (size) == INTEGER_CST)
type
= build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
C_TYPE_VARIABLE_SIZE (type) = 1;
}
/* The GCC extension for zero-length arrays differs from
ISO flexible array members in that sizeof yields
zero. */
if (size && integer_zerop (size))
{
gcc_assert (itype);
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node;
SET_TYPE_STRUCTURAL_EQUALITY (type);
}
if (array_parm_vla_unspec_p)
{
gcc_assert (itype);
/* The type is complete. C99 6.7.5.2p4 */
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node;
SET_TYPE_STRUCTURAL_EQUALITY (type);
}
if (!valid_array_size_p (loc, type, name))
type = error_mark_node;
}
if (decl_context != PARM
&& (array_ptr_quals != TYPE_UNQUALIFIED
|| array_ptr_attrs != NULL_TREE
|| array_parm_static))
{
error_at (loc, "static or type qualifiers in non-parameter "
"array declarator");
array_ptr_quals = TYPE_UNQUALIFIED;
array_ptr_attrs = NULL_TREE;
array_parm_static = false;
}
orig_qual_indirect++;
break;
}
case cdk_function:
{
/* Say it's a definition only for the declarator closest
to the identifier, apart possibly from some
attributes. */
bool really_funcdef = false;
tree arg_types;
orig_qual_type = NULL_TREE;
if (funcdef_flag)
{
const struct c_declarator *t = declarator->declarator;
while (t->kind == cdk_attrs)
t = t->declarator;
really_funcdef = (t->kind == cdk_id);
}
/* Declaring a function type. Make sure we have a valid
type for the function to return. */
if (type == error_mark_node)
continue;
size_varies = false;
/* Warn about some types functions can't return. */
if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (name)
error_at (loc, "%qE declared as function returning a "
"function", name);
else
error_at (loc, "type name declared as function "
"returning a function");
type = integer_type_node;
}
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (name)
error_at (loc, "%qE declared as function returning an array",
name);
else
error_at (loc, "type name declared as function returning "
"an array");
type = integer_type_node;
}
/* Construct the function type and go to the next
inner layer of declarator. */
arg_info = declarator->u.arg_info;
arg_types = grokparms (arg_info, really_funcdef);
/* Type qualifiers before the return type of the function
qualify the return type, not the function type. */
if (type_quals)
{
const enum c_declspec_word ignored_quals_list[] =
{
cdw_const, cdw_volatile, cdw_restrict, cdw_address_space,
cdw_atomic, cdw_number_of_elements
};
location_t specs_loc
= smallest_type_quals_location (declspecs->locations,
ignored_quals_list);
if (specs_loc == UNKNOWN_LOCATION)
specs_loc = declspecs->locations[cdw_typedef];
if (specs_loc == UNKNOWN_LOCATION)
specs_loc = loc;
/* Type qualifiers on a function return type are
normally permitted by the standard but have no
effect, so give a warning at -Wreturn-type.
Qualifiers on a void return type are banned on
function definitions in ISO C; GCC used to used
them for noreturn functions. The resolution of C11
DR#423 means qualifiers (other than _Atomic) are
actually removed from the return type when
determining the function type. */
int quals_used = type_quals;
if (flag_isoc11)
quals_used &= TYPE_QUAL_ATOMIC;
if (quals_used && VOID_TYPE_P (type) && really_funcdef)
pedwarn (specs_loc, 0,
"function definition has qualified void return type");
else
warning_at (specs_loc, OPT_Wignored_qualifiers,
"type qualifiers ignored on function return type");
/* Ensure an error for restrict on invalid types; the
DR#423 resolution is not entirely clear about
this. */
if (flag_isoc11
&& (type_quals & TYPE_QUAL_RESTRICT)
&& (!POINTER_TYPE_P (type)
|| !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type))))
error_at (loc, "invalid use of %<restrict%>");
if (quals_used)
type = c_build_qualified_type (type, quals_used);
}
type_quals = TYPE_UNQUALIFIED;
type = build_function_type (type, arg_types);
declarator = declarator->declarator;
/* Set the TYPE_CONTEXTs for each tagged type which is local to
the formal parameter list of this FUNCTION_TYPE to point to
the FUNCTION_TYPE node itself. */
{
c_arg_tag *tag;
unsigned ix;
FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
TYPE_CONTEXT (tag->type) = type;
}
break;
}
case cdk_pointer:
{
/* Merge any constancy or volatility into the target type
for the pointer. */
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
orig_qual_type = NULL_TREE;
size_varies = false;
/* When the pointed-to type involves components of variable size,
care must be taken to ensure that the size evaluation code is
emitted early enough to dominate all the possible later uses
and late enough for the variables on which it depends to have
been assigned.
This is expected to happen automatically when the pointed-to
type has a name/declaration of it's own, but special attention
is required if the type is anonymous.
We attach an artificial TYPE_DECL to such pointed-to type
and arrange for it to be included in a DECL_EXPR. This
forces the sizes evaluation at a safe point and ensures it
is not deferred until e.g. within a deeper conditional context.
PARM contexts have no enclosing statement list that
can hold the DECL_EXPR, so we need to use a BIND_EXPR
instead, and add it to the list of expressions that
need to be evaluated.
TYPENAME contexts do have an enclosing statement list,
but it would be incorrect to use it, as the size should
only be evaluated if the containing expression is
evaluated. We might also be in the middle of an
expression with side effects on the pointed-to type size
"arguments" prior to the pointer declaration point and
the fake TYPE_DECL in the enclosing context would force
the size evaluation prior to the side effects. We therefore
use BIND_EXPRs in TYPENAME contexts too. */
if (!TYPE_NAME (type)
&& variably_modified_type_p (type, NULL_TREE))
{
tree bind = NULL_TREE;
if (decl_context == TYPENAME || decl_context == PARM)
{
bind = build3 (BIND_EXPR, void_type_node, NULL_TREE,
NULL_TREE, NULL_TREE);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = push_stmt_list ();
push_scope ();
}
tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type);
DECL_ARTIFICIAL (decl) = 1;
pushdecl (decl);
finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE);
TYPE_NAME (type) = decl;
if (bind)
{
pop_scope ();
BIND_EXPR_BODY (bind)
= pop_stmt_list (BIND_EXPR_BODY (bind));
if (*expr)
*expr = build2 (COMPOUND_EXPR, void_type_node, *expr,
bind);
else
*expr = bind;
}
}
type = c_build_pointer_type (type);
/* Process type qualifiers (such as const or volatile)
that were given inside the `*'. */
type_quals = declarator->u.pointer_quals;
declarator = declarator->declarator;
break;
}
default:
gcc_unreachable ();
}
}
*decl_attrs = chainon (returned_attrs, *decl_attrs);
/* Now TYPE has the actual type, apart from any qualifiers in
TYPE_QUALS. */
/* Warn about address space used for things other than static memory or
pointers. */
address_space = DECODE_QUAL_ADDR_SPACE (type_quals);
if (!ADDR_SPACE_GENERIC_P (address_space))
{
if (decl_context == NORMAL)
{
switch (storage_class)
{
case csc_auto:
error ("%qs combined with %<auto%> qualifier for %qE",
c_addr_space_name (address_space), name);
break;
case csc_register:
error ("%qs combined with %<register%> qualifier for %qE",
c_addr_space_name (address_space), name);
break;
case csc_none:
if (current_function_scope)
{
error ("%qs specified for auto variable %qE",
c_addr_space_name (address_space), name);
break;
}
break;
case csc_static:
case csc_extern:
case csc_typedef:
break;
default:
gcc_unreachable ();
}
}
else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE)
{
if (name)
error ("%qs specified for parameter %qE",
c_addr_space_name (address_space), name);
else
error ("%qs specified for unnamed parameter",
c_addr_space_name (address_space));
}
else if (decl_context == FIELD)
{
if (name)
error ("%qs specified for structure field %qE",
c_addr_space_name (address_space), name);
else
error ("%qs specified for structure field",
c_addr_space_name (address_space));
}
}
/* Check the type and width of a bit-field. */
if (bitfield)
{
check_bitfield_type_and_width (loc, &type, width, name);
/* C11 makes it implementation-defined (6.7.2.1#5) whether
atomic types are permitted for bit-fields; we have no code to
make bit-field accesses atomic, so disallow them. */
if (type_quals & TYPE_QUAL_ATOMIC)
{
if (name)
error_at (loc, "bit-field %qE has atomic type", name);
else
error_at (loc, "bit-field has atomic type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
}
/* Reject invalid uses of _Alignas. */
if (declspecs->alignas_p)
{
if (storage_class == csc_typedef)
error_at (loc, "alignment specified for typedef %qE", name);
else if (storage_class == csc_register)
error_at (loc, "alignment specified for %<register%> object %qE",
name);
else if (decl_context == PARM)
{
if (name)
error_at (loc, "alignment specified for parameter %qE", name);
else
error_at (loc, "alignment specified for unnamed parameter");
}
else if (bitfield)
{
if (name)
error_at (loc, "alignment specified for bit-field %qE", name);
else
error_at (loc, "alignment specified for unnamed bit-field");
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
error_at (loc, "alignment specified for function %qE", name);
else if (declspecs->align_log != -1 && TYPE_P (type))
{
alignas_align = 1U << declspecs->align_log;
if (alignas_align < min_align_of_type (type))
{
if (name)
error_at (loc, "%<_Alignas%> specifiers cannot reduce "
"alignment of %qE", name);
else
error_at (loc, "%<_Alignas%> specifiers cannot reduce "
"alignment of unnamed field");
alignas_align = 0;
}
}
}
/* If this is declaring a typedef name, return a TYPE_DECL. */
if (storage_class == csc_typedef)
{
tree decl;
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
decl = build_decl (declarator->id_loc,
TYPE_DECL, declarator->u.id, type);
if (declspecs->explicit_signed_p)
C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1;
if (declspecs->inline_p)
pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl);
if (warn_cxx_compat && declarator->u.id != NULL_TREE)
{
struct c_binding *b = I_TAG_BINDING (declarator->u.id);
if (b != NULL
&& b->decl != NULL_TREE
&& (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
&& TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type))
{
if (warning_at (declarator->id_loc, OPT_Wc___compat,
("using %qD as both a typedef and a tag is "
"invalid in C++"), decl)
&& b->locus != UNKNOWN_LOCATION)
inform (b->locus, "originally defined here");
}
}
return decl;
}
/* If this is a type name (such as, in a cast or sizeof),
compute the type and return it now. */
if (decl_context == TYPENAME)
{
/* Note that the grammar rejects storage classes in typenames
and fields. */
gcc_assert (storage_class == csc_none && !threadp
&& !declspecs->inline_p && !declspecs->noreturn_p);
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids const or volatile function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
return type;
}
if (pedantic && decl_context == FIELD
&& variably_modified_type_p (type, NULL_TREE))
{
/* C99 6.7.2.1p8 */
pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot "
"have a variably modified type");
}
/* Aside from typedefs and type names (handle above),
`void' at top level (not within pointer)
is allowed only in public variables.
We don't complain about parms either, but that is because
a better error message can be made later. */
if (VOID_TYPE_P (type) && decl_context != PARM
&& !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE)
&& (storage_class == csc_extern
|| (current_scope == file_scope
&& !(storage_class == csc_static
|| storage_class == csc_register)))))
{
error_at (loc, "variable or field %qE declared void", name);
type = integer_type_node;
}
/* Now create the decl, which may be a VAR_DECL, a PARM_DECL
or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */
{
tree decl;
if (decl_context == PARM)
{
tree promoted_type;
bool array_parameter_p = false;
/* A parameter declared as an array of T is really a pointer to T.
One declared as a function is really a pointer to a function. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
/* Transfer const-ness of array into that of type pointed to. */
type = TREE_TYPE (type);
if (orig_qual_type != NULL_TREE)
{
if (orig_qual_indirect == 0)
orig_qual_type = TREE_TYPE (orig_qual_type);
else
orig_qual_indirect--;
}
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
type = c_build_pointer_type (type);
type_quals = array_ptr_quals;
if (type_quals)
type = c_build_qualified_type (type, type_quals);
/* We don't yet implement attributes in this context. */
if (array_ptr_attrs != NULL_TREE)
warning_at (loc, OPT_Wattributes,
"attributes in parameter array declarator ignored");
size_varies = false;
array_parameter_p = true;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (type_quals & TYPE_QUAL_ATOMIC)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals);
type = c_build_pointer_type (type);
type_quals = TYPE_UNQUALIFIED;
}
else if (type_quals)
type = c_build_qualified_type (type, type_quals);
decl = build_decl (declarator->id_loc,
PARM_DECL, declarator->u.id, type);
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
C_ARRAY_PARAMETER (decl) = array_parameter_p;
/* Compute the type actually passed in the parmlist,
for the case where there is no prototype.
(For example, shorts and chars are passed as ints.)
When there is a prototype, this is overridden later. */
if (type == error_mark_node)
promoted_type = type;
else
promoted_type = c_type_promotes_to (type);
DECL_ARG_TYPE (decl) = promoted_type;
if (declspecs->inline_p)
pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl);
}
else if (decl_context == FIELD)
{
/* Note that the grammar rejects storage classes in typenames
and fields. */
gcc_assert (storage_class == csc_none && !threadp
&& !declspecs->inline_p && !declspecs->noreturn_p);
/* Structure field. It may not be a function. */
if (TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc, "field %qE declared as a function", name);
type = build_pointer_type (type);
}
else if (TREE_CODE (type) != ERROR_MARK
&& !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type))
{
if (name)
error_at (loc, "field %qE has incomplete type", name);
else
error_at (loc, "unnamed field has incomplete type");
type = error_mark_node;
}
else if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE)
{
/* We have a flexible array member through a typedef.
Set suitable range. Whether this is a correct position
for a flexible array member will be determined elsewhere. */
if (!in_system_header_at (input_location))
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not "
"support flexible array members");
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node,
NULL_TREE);
if (orig_qual_indirect == 0)
orig_qual_type = NULL_TREE;
}
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
decl = build_decl (declarator->id_loc,
FIELD_DECL, declarator->u.id, type);
DECL_NONADDRESSABLE_P (decl) = bitfield;
if (bitfield && !declarator->u.id)
{
TREE_NO_WARNING (decl) = 1;
DECL_PADDING_P (decl) = 1;
}
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (storage_class == csc_register || threadp)
{
error_at (loc, "invalid storage class for function %qE", name);
}
else if (current_scope != file_scope)
{
/* Function declaration not at file scope. Storage
classes other than `extern' are not allowed, C99
6.7.1p5, and `extern' makes no difference. However,
GCC allows 'auto', perhaps with 'inline', to support
nested functions. */
if (storage_class == csc_auto)
pedwarn (loc, OPT_Wpedantic,
"invalid storage class for function %qE", name);
else if (storage_class == csc_static)
{
error_at (loc, "invalid storage class for function %qE", name);
if (funcdef_flag)
storage_class = declspecs->storage_class = csc_none;
else
return NULL_TREE;
}
}
decl = build_decl (declarator->id_loc,
FUNCTION_DECL, declarator->u.id, type);
decl = build_decl_attribute_variant (decl, decl_attr);
if (type_quals & TYPE_QUAL_ATOMIC)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl))
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
/* Every function declaration is an external reference
(DECL_EXTERNAL) except for those which are not at file
scope and are explicitly declared "auto". This is
forbidden by standard C (C99 6.7.1p5) and is interpreted by
GCC to signify a forward declaration of a nested function. */
if (storage_class == csc_auto && current_scope != file_scope)
DECL_EXTERNAL (decl) = 0;
/* In C99, a function which is declared 'inline' with 'extern'
is not an external reference (which is confusing). It
means that the later definition of the function must be output
in this file, C99 6.7.4p6. In GNU C89, a function declared
'extern inline' is an external reference. */
else if (declspecs->inline_p && storage_class != csc_static)
DECL_EXTERNAL (decl) = ((storage_class == csc_extern)
== flag_gnu89_inline);
else
DECL_EXTERNAL (decl) = !initialized;
/* Record absence of global scope for `static' or `auto'. */
TREE_PUBLIC (decl)
= !(storage_class == csc_static || storage_class == csc_auto);
/* For a function definition, record the argument information
block where store_parm_decls will look for it. */
if (funcdef_flag)
current_function_arg_info = arg_info;
if (declspecs->default_int_p)
C_FUNCTION_IMPLICIT_INT (decl) = 1;
/* Record presence of `inline' and `_Noreturn', if it is
reasonable. */
if (flag_hosted && MAIN_NAME_P (declarator->u.id))
{
if (declspecs->inline_p)
pedwarn (loc, 0, "cannot inline function %<main%>");
if (declspecs->noreturn_p)
pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>");
}
else
{
if (declspecs->inline_p)
/* Record that the function is declared `inline'. */
DECL_DECLARED_INLINE_P (decl) = 1;
if (declspecs->noreturn_p)
{
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %<_Noreturn%>");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %<_Noreturn%>");
TREE_THIS_VOLATILE (decl) = 1;
}
}
}
else
{
/* It's a variable. */
/* An uninitialized decl with `extern' is a reference. */
int extern_ref = !initialized && storage_class == csc_extern;
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
/* C99 6.2.2p7: It is invalid (compile-time undefined
behavior) to create an 'extern' declaration for a
variable if there is a global declaration that is
'static' and the global declaration is not visible.
(If the static declaration _is_ currently visible,
the 'extern' declaration is taken to refer to that decl.) */
if (extern_ref && current_scope != file_scope)
{
tree global_decl = identifier_global_value (declarator->u.id);
tree visible_decl = lookup_name (declarator->u.id);
if (global_decl
&& global_decl != visible_decl
&& VAR_P (global_decl)
&& !TREE_PUBLIC (global_decl))
error_at (loc, "variable previously declared %<static%> "
"redeclared %<extern%>");
}
decl = build_decl (declarator->id_loc,
VAR_DECL, declarator->u.id, type);
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
if (declspecs->inline_p)
pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl);
/* At file scope, an initialized extern declaration may follow
a static declaration. In that case, DECL_EXTERNAL will be
reset later in start_decl. */
DECL_EXTERNAL (decl) = (storage_class == csc_extern);
/* At file scope, the presence of a `static' or `register' storage
class specifier, or the absence of all storage class specifiers
makes this declaration a definition (perhaps tentative). Also,
the absence of `static' makes it public. */
if (current_scope == file_scope)
{
TREE_PUBLIC (decl) = storage_class != csc_static;
TREE_STATIC (decl) = !extern_ref;
}
/* Not at file scope, only `static' makes a static definition. */
else
{
TREE_STATIC (decl) = (storage_class == csc_static);
TREE_PUBLIC (decl) = extern_ref;
}
if (threadp)
set_decl_tls_model (decl, decl_default_tls_model (decl));
}
if ((storage_class == csc_extern
|| (storage_class == csc_none
&& TREE_CODE (type) == FUNCTION_TYPE
&& !funcdef_flag))
&& variably_modified_type_p (type, NULL_TREE))
{
/* C99 6.7.5.2p2 */
if (TREE_CODE (type) == FUNCTION_TYPE)
error_at (loc, "non-nested function with variably modified type");
else
error_at (loc, "object with variably modified type must have "
"no linkage");
}
/* Record `register' declaration for warnings on &
and in case doing stupid register allocation. */
if (storage_class == csc_register)
{
C_DECL_REGISTER (decl) = 1;
DECL_REGISTER (decl) = 1;
}
/* Record constancy and volatility. */
c_apply_type_quals_to_decl (type_quals, decl);
/* Apply _Alignas specifiers. */
if (alignas_align)
{
SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT);
DECL_USER_ALIGN (decl) = 1;
}
/* If a type has volatile components, it should be stored in memory.
Otherwise, the fact that those components are volatile
will be ignored, and would even crash the compiler.
Of course, this only makes sense on VAR,PARM, and RESULT decl's. */
if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl))
&& (VAR_P (decl) || TREE_CODE (decl) == PARM_DECL
|| TREE_CODE (decl) == RESULT_DECL))
{
/* It is not an error for a structure with volatile fields to
be declared register, but reset DECL_REGISTER since it
cannot actually go in a register. */
int was_reg = C_DECL_REGISTER (decl);
C_DECL_REGISTER (decl) = 0;
DECL_REGISTER (decl) = 0;
c_mark_addressable (decl);
C_DECL_REGISTER (decl) = was_reg;
}
/* This is the earliest point at which we might know the assembler
name of a variable. Thus, if it's known before this, die horribly. */
gcc_assert (!HAS_DECL_ASSEMBLER_NAME_P (decl)
|| !DECL_ASSEMBLER_NAME_SET_P (decl));
if (warn_cxx_compat
&& VAR_P (decl)
&& TREE_PUBLIC (decl)
&& TREE_STATIC (decl)
&& (RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl))
|| TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
&& TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE)
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
("non-local variable %qD with anonymous type is "
"questionable in C++"),
decl);
return decl;
}
}
/* Decode the parameter-list info for a function type or function definition.
The argument is the value returned by `get_parm_info' (or made in c-parse.c
if there is an identifier list instead of a parameter decl list).
These two functions are separate because when a function returns
or receives functions then each is called multiple times but the order
of calls is different. The last call to `grokparms' is always the one
that contains the formal parameter names of a function definition.
Return a list of arg types to use in the FUNCTION_TYPE for this function.
FUNCDEF_FLAG is true for a function definition, false for
a mere declaration. A nonempty identifier-list gets an error message
when FUNCDEF_FLAG is false. */
static tree
grokparms (struct c_arg_info *arg_info, bool funcdef_flag)
{
tree arg_types = arg_info->types;
if (funcdef_flag && arg_info->had_vla_unspec)
{
/* A function definition isn't function prototype scope C99 6.2.1p4. */
/* C99 6.7.5.2p4 */
error ("%<[*]%> not allowed in other than function prototype scope");
}
if (arg_types == NULL_TREE && !funcdef_flag
&& !in_system_header_at (input_location))
warning (OPT_Wstrict_prototypes,
"function declaration isn%'t a prototype");
if (arg_types == error_mark_node)
/* Don't set TYPE_ARG_TYPES in this case. */
return NULL_TREE;
else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE)
{
if (!funcdef_flag)
{
pedwarn (input_location, 0, "parameter names (without types) in "
"function declaration");
arg_info->parms = NULL_TREE;
}
else
arg_info->parms = arg_info->types;
arg_info->types = NULL_TREE;
return NULL_TREE;
}
else
{
tree parm, type, typelt;
unsigned int parmno;
/* If there is a parameter of incomplete type in a definition,
this is an error. In a declaration this is valid, and a
struct or union type may be completed later, before any calls
or definition of the function. In the case where the tag was
first declared within the parameter list, a warning has
already been given. If a parameter has void type, then
however the function cannot be defined or called, so
warn. */
for (parm = arg_info->parms, typelt = arg_types, parmno = 1;
parm;
parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++)
{
type = TREE_VALUE (typelt);
if (type == error_mark_node)
continue;
if (!COMPLETE_TYPE_P (type))
{
if (funcdef_flag)
{
if (DECL_NAME (parm))
error_at (input_location,
"parameter %u (%q+D) has incomplete type",
parmno, parm);
else
error_at (DECL_SOURCE_LOCATION (parm),
"parameter %u has incomplete type",
parmno);
TREE_VALUE (typelt) = error_mark_node;
TREE_TYPE (parm) = error_mark_node;
arg_types = NULL_TREE;
}
else if (VOID_TYPE_P (type))
{
if (DECL_NAME (parm))
warning_at (input_location, 0,
"parameter %u (%q+D) has void type",
parmno, parm);
else
warning_at (DECL_SOURCE_LOCATION (parm), 0,
"parameter %u has void type",
parmno);
}
}
if (DECL_NAME (parm) && TREE_USED (parm))
warn_if_shadowing (parm);
}
return arg_types;
}
}
/* Allocate and initialize a c_arg_info structure from the parser's
obstack. */
struct c_arg_info *
build_arg_info (void)
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = NULL_TREE;
ret->tags = NULL;
ret->types = NULL_TREE;
ret->others = NULL_TREE;
ret->pending_sizes = NULL;
ret->had_vla_unspec = 0;
return ret;
}
/* Take apart the current scope and return a c_arg_info structure with
info on a parameter list just parsed.
This structure is later fed to 'grokparms' and 'store_parm_decls'.
ELLIPSIS being true means the argument list ended in '...' so don't
append a sentinel (void_list_node) to the end of the type-list.
EXPR is NULL or an expression that needs to be evaluated for the
side effects of array size expressions in the parameters. */
struct c_arg_info *
get_parm_info (bool ellipsis, tree expr)
{
struct c_binding *b = current_scope->bindings;
struct c_arg_info *arg_info = build_arg_info ();
tree parms = NULL_TREE;
vec<c_arg_tag, va_gc> *tags = NULL;
tree types = NULL_TREE;
tree others = NULL_TREE;
bool gave_void_only_once_err = false;
arg_info->had_vla_unspec = current_scope->had_vla_unspec;
/* The bindings in this scope must not get put into a block.
We will take care of deleting the binding nodes. */
current_scope->bindings = 0;
/* This function is only called if there was *something* on the
parameter list. */
gcc_assert (b);
/* A parameter list consisting solely of 'void' indicates that the
function takes no arguments. But if the 'void' is qualified
(by 'const' or 'volatile'), or has a storage class specifier
('register'), then the behavior is undefined; issue an error.
Typedefs for 'void' are OK (see DR#157). */
if (b->prev == 0 /* one binding */
&& TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */
&& !DECL_NAME (b->decl) /* anonymous */
&& VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */
{
if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED
|| C_DECL_REGISTER (b->decl))
error_at (b->locus, "%<void%> as only parameter may not be qualified");
/* There cannot be an ellipsis. */
if (ellipsis)
error_at (b->locus, "%<void%> must be the only parameter");
arg_info->types = void_list_node;
return arg_info;
}
if (!ellipsis)
types = void_list_node;
/* Break up the bindings list into parms, tags, types, and others;
apply sanity checks; purge the name-to-decl bindings. */
while (b)
{
tree decl = b->decl;
tree type = TREE_TYPE (decl);
c_arg_tag tag;
const char *keyword;
switch (TREE_CODE (decl))
{
case PARM_DECL:
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
}
/* Check for forward decls that never got their actual decl. */
if (TREE_ASM_WRITTEN (decl))
error_at (b->locus,
"parameter %q+D has just a forward declaration", decl);
/* Check for (..., void, ...) and issue an error. */
else if (VOID_TYPE_P (type) && !DECL_NAME (decl))
{
if (!gave_void_only_once_err)
{
error_at (b->locus, "%<void%> must be the only parameter");
gave_void_only_once_err = true;
}
}
else
{
/* Valid parameter, add it to the list. */
DECL_CHAIN (decl) = parms;
parms = decl;
/* Since there is a prototype, args are passed in their
declared types. The back end may override this later. */
DECL_ARG_TYPE (decl) = type;
types = tree_cons (0, type, types);
}
break;
case ENUMERAL_TYPE: keyword = "enum"; goto tag;
case UNION_TYPE: keyword = "union"; goto tag;
case RECORD_TYPE: keyword = "struct"; goto tag;
tag:
/* Types may not have tag-names, in which case the type
appears in the bindings list with b->id NULL. */
if (b->id)
{
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
/* Warn about any struct, union or enum tags defined in a
parameter list. The scope of such types is limited to
the parameter list, which is rarely if ever desirable
(it's impossible to call such a function with type-
correct arguments). An anonymous union parm type is
meaningful as a GNU extension, so don't warn for that. */
if (TREE_CODE (decl) != UNION_TYPE || b->id != NULL_TREE)
{
if (b->id)
/* The %s will be one of 'struct', 'union', or 'enum'. */
warning_at (b->locus, 0,
"%<%s %E%> declared inside parameter list"
" will not be visible outside of this definition or"
" declaration", keyword, b->id);
else
/* The %s will be one of 'struct', 'union', or 'enum'. */
warning_at (b->locus, 0,
"anonymous %s declared inside parameter list"
" will not be visible outside of this definition or"
" declaration", keyword);
}
tag.id = b->id;
tag.type = decl;
vec_safe_push (tags, tag);
break;
case FUNCTION_DECL:
/* FUNCTION_DECLs appear when there is an implicit function
declaration in the parameter list. */
gcc_assert (b->nested || seen_error ());
goto set_shadowed;
case CONST_DECL:
case TYPE_DECL:
/* CONST_DECLs appear here when we have an embedded enum,
and TYPE_DECLs appear here when we have an embedded struct
or union. No warnings for this - we already warned about the
type itself. */
/* When we reinsert this decl in the function body, we need
to reconstruct whether it was marked as nested. */
gcc_assert (!b->nested);
DECL_CHAIN (decl) = others;
others = decl;
/* fall through */
case ERROR_MARK:
set_shadowed:
/* error_mark_node appears here when we have an undeclared
variable. Just throw it away. */
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
}
break;
/* Other things that might be encountered. */
case LABEL_DECL:
case VAR_DECL:
default:
gcc_unreachable ();
}
b = free_binding_and_advance (b);
}
arg_info->parms = parms;
arg_info->tags = tags;
arg_info->types = types;
arg_info->others = others;
arg_info->pending_sizes = expr;
return arg_info;
}
/* Get the struct, enum or union (CODE says which) with tag NAME.
Define the tag as a forward-reference with location LOC if it is
not defined. Return a c_typespec structure for the type
specifier. */
struct c_typespec
parser_xref_tag (location_t loc, enum tree_code code, tree name)
{
struct c_typespec ret;
tree ref;
location_t refloc;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
/* If a cross reference is requested, look up the type
already defined for this tag and return it. */
ref = lookup_tag (code, name, false, &refloc);
/* If this is the right type of tag, return what we found.
(This reference will be shadowed by shadow_tag later if appropriate.)
If this is the wrong type of tag, do not return it. If it was the
wrong type in the same scope, we will have had an error
message already; if in a different scope and declaring
a name, pending_xref_error will give an error message; but if in a
different scope and not declaring a name, this tag should
shadow the previous declaration of a different type of tag, and
this would not work properly if we return the reference found.
(For example, with "struct foo" in an outer scope, "union foo;"
must shadow that tag with a new one of union type.) */
ret.kind = (ref ? ctsk_tagref : ctsk_tagfirstref);
if (ref && TREE_CODE (ref) == code)
{
if (C_TYPE_DEFINED_IN_STRUCT (ref)
&& loc != UNKNOWN_LOCATION
&& warn_cxx_compat)
{
switch (code)
{
case ENUMERAL_TYPE:
warning_at (loc, OPT_Wc___compat,
("enum type defined in struct or union "
"is not visible in C++"));
inform (refloc, "enum type defined here");
break;
case RECORD_TYPE:
warning_at (loc, OPT_Wc___compat,
("struct defined in struct or union "
"is not visible in C++"));
inform (refloc, "struct defined here");
break;
case UNION_TYPE:
warning_at (loc, OPT_Wc___compat,
("union defined in struct or union "
"is not visible in C++"));
inform (refloc, "union defined here");
break;
default:
gcc_unreachable();
}
}
ret.spec = ref;
return ret;
}
/* If no such tag is yet defined, create a forward-reference node
and record it as the "definition".
When a real declaration of this type is found,
the forward-reference will be altered into a real type. */
ref = make_node (code);
if (code == ENUMERAL_TYPE)
{
/* Give the type a default layout like unsigned int
to avoid crashing if it does not get defined. */
SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node));
SET_TYPE_ALIGN (ref, TYPE_ALIGN (unsigned_type_node));
TYPE_USER_ALIGN (ref) = 0;
TYPE_UNSIGNED (ref) = 1;
TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node);
TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node);
TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node);
}
pushtag (loc, name, ref);
ret.spec = ref;
return ret;
}
/* Get the struct, enum or union (CODE says which) with tag NAME.
Define the tag as a forward-reference if it is not defined.
Return a tree for the type. */
tree
xref_tag (enum tree_code code, tree name)
{
return parser_xref_tag (input_location, code, name).spec;
}
/* Make sure that the tag NAME is defined *in the current scope*
at least as a forward reference.
LOC is the location of the struct's definition.
CODE says which kind of tag NAME ought to be.
This stores the current value of the file static STRUCT_PARSE_INFO
in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a
new c_struct_parse_info structure. The old value of
STRUCT_PARSE_INFO is restored in finish_struct. */
tree
start_struct (location_t loc, enum tree_code code, tree name,
struct c_struct_parse_info **enclosing_struct_parse_info)
{
/* If there is already a tag defined at this scope
(as a forward reference), just return it. */
tree ref = NULL_TREE;
location_t refloc = UNKNOWN_LOCATION;
if (name != NULL_TREE)
ref = lookup_tag (code, name, true, &refloc);
if (ref && TREE_CODE (ref) == code)
{
if (TYPE_STUB_DECL (ref))
refloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (ref));
if (TYPE_SIZE (ref))
{
if (code == UNION_TYPE)
error_at (loc, "redefinition of %<union %E%>", name);
else
error_at (loc, "redefinition of %<struct %E%>", name);
if (refloc != UNKNOWN_LOCATION)
inform (refloc, "originally defined here");
/* Don't create structures using a name already in use. */
ref = NULL_TREE;
}
else if (C_TYPE_BEING_DEFINED (ref))
{
if (code == UNION_TYPE)
error_at (loc, "nested redefinition of %<union %E%>", name);
else
error_at (loc, "nested redefinition of %<struct %E%>", name);
/* Don't bother to report "originally defined here" for a
nested redefinition; the original definition should be
obvious. */
/* Don't create structures that contain themselves. */
ref = NULL_TREE;
}
}
/* Otherwise create a forward-reference just so the tag is in scope. */
if (ref == NULL_TREE || TREE_CODE (ref) != code)
{
ref = make_node (code);
pushtag (loc, name, ref);
}
C_TYPE_BEING_DEFINED (ref) = 1;
for (tree v = TYPE_MAIN_VARIANT (ref); v; v = TYPE_NEXT_VARIANT (v))
TYPE_PACKED (v) = flag_pack_struct;
*enclosing_struct_parse_info = struct_parse_info;
struct_parse_info = new c_struct_parse_info ();
/* FIXME: This will issue a warning for a use of a type defined
within a statement expr used within sizeof, et. al. This is not
terribly serious as C++ doesn't permit statement exprs within
sizeof anyhow. */
if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof))
warning_at (loc, OPT_Wc___compat,
"defining type in %qs expression is invalid in C++",
(in_sizeof
? "sizeof"
: (in_typeof ? "typeof" : "alignof")));
return ref;
}
/* Process the specs, declarator and width (NULL if omitted)
of a structure component, returning a FIELD_DECL node.
WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node.
DECL_ATTRS is as for grokdeclarator.
LOC is the location of the structure component.
This is done during the parsing of the struct declaration.
The FIELD_DECL nodes are chained together and the lot of them
are ultimately passed to `build_struct' to make the RECORD_TYPE node. */
tree
grokfield (location_t loc,
struct c_declarator *declarator, struct c_declspecs *declspecs,
tree width, tree *decl_attrs)
{
tree value;
if (declarator->kind == cdk_id && declarator->u.id == NULL_TREE
&& width == NULL_TREE)
{
/* This is an unnamed decl.
If we have something of the form "union { list } ;" then this
is the anonymous union extension. Similarly for struct.
If this is something of the form "struct foo;", then
If MS or Plan 9 extensions are enabled, this is handled as
an anonymous struct.
Otherwise this is a forward declaration of a structure tag.
If this is something of the form "foo;" and foo is a TYPE_DECL, then
If foo names a structure or union without a tag, then this
is an anonymous struct (this is permitted by C11).
If MS or Plan 9 extensions are enabled and foo names a
structure, then again this is an anonymous struct.
Otherwise this is an error.
Oh what a horrid tangled web we weave. I wonder if MS consciously
took this from Plan 9 or if it was an accident of implementation
that took root before someone noticed the bug... */
tree type = declspecs->type;
bool ok = false;
if (RECORD_OR_UNION_TYPE_P (type)
&& (flag_ms_extensions
|| flag_plan9_extensions
|| !declspecs->typedef_p))
{
if (flag_ms_extensions || flag_plan9_extensions)
ok = true;
else if (TYPE_NAME (type) == NULL)
ok = true;
else
ok = false;
}
if (!ok)
{
pedwarn (loc, 0, "declaration does not declare anything");
return NULL_TREE;
}
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 doesn%'t support unnamed structs/unions");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 doesn%'t support unnamed structs/unions");
}
value = grokdeclarator (declarator, declspecs, FIELD, false,
width ? &width : NULL, decl_attrs, NULL, NULL,
DEPRECATED_NORMAL);
finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE);
DECL_INITIAL (value) = width;
if (width)
SET_DECL_C_BIT_FIELD (value);
if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE)
{
/* If we currently have a binding for this field, set the
in_struct field in the binding, so that we warn about lookups
which find it. */
struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value));
if (b != NULL)
{
/* If the in_struct field is not yet set, push it on a list
to be cleared when this struct is finished. */
if (!b->in_struct)
{
struct_parse_info->fields.safe_push (b);
b->in_struct = 1;
}
}
}
return value;
}
/* Subroutine of detect_field_duplicates: return whether X and Y,
which are both fields in the same struct, have duplicate field
names. */
static bool
is_duplicate_field (tree x, tree y)
{
if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y))
return true;
/* When using -fplan9-extensions, an anonymous field whose name is a
typedef can duplicate a field name. */
if (flag_plan9_extensions
&& (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE))
{
tree xt, xn, yt, yn;
xt = TREE_TYPE (x);
if (DECL_NAME (x) != NULL_TREE)
xn = DECL_NAME (x);
else if (RECORD_OR_UNION_TYPE_P (xt)
&& TYPE_NAME (xt) != NULL_TREE
&& TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL)
xn = DECL_NAME (TYPE_NAME (xt));
else
xn = NULL_TREE;
yt = TREE_TYPE (y);
if (DECL_NAME (y) != NULL_TREE)
yn = DECL_NAME (y);
else if (RECORD_OR_UNION_TYPE_P (yt)
&& TYPE_NAME (yt) != NULL_TREE
&& TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL)
yn = DECL_NAME (TYPE_NAME (yt));
else
yn = NULL_TREE;
if (xn != NULL_TREE && xn == yn)
return true;
}
return false;
}
/* Subroutine of detect_field_duplicates: add the fields of FIELDLIST
to HTAB, giving errors for any duplicates. */
static void
detect_field_duplicates_hash (tree fieldlist,
hash_table<nofree_ptr_hash <tree_node> > *htab)
{
tree x, y;
tree_node **slot;
for (x = fieldlist; x ; x = DECL_CHAIN (x))
if ((y = DECL_NAME (x)) != NULL_TREE)
{
slot = htab->find_slot (y, INSERT);
if (*slot)
{
error ("duplicate member %q+D", x);
DECL_NAME (x) = NULL_TREE;
}
*slot = y;
}
else if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
{
detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab);
/* When using -fplan9-extensions, an anonymous field whose
name is a typedef can duplicate a field name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (x)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)
{
tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x)));
slot = htab->find_slot (xn, INSERT);
if (*slot)
error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x)));
*slot = xn;
}
}
}
/* Generate an error for any duplicate field names in FIELDLIST. Munge
the list such that this does not present a problem later. */
static void
detect_field_duplicates (tree fieldlist)
{
tree x, y;
int timeout = 10;
/* If the struct is the list of instance variables of an Objective-C
class, then we need to check all the instance variables of
superclasses when checking for duplicates (since you can't have
an instance variable in a subclass with the same name as an
instance variable in a superclass). We pass on this job to the
Objective-C compiler. objc_detect_field_duplicates() will return
false if we are not checking the list of instance variables and
the C frontend should proceed with the standard field duplicate
checks. If we are checking the list of instance variables, the
ObjC frontend will do the check, emit the errors if needed, and
then return true. */
if (c_dialect_objc ())
if (objc_detect_field_duplicates (false))
return;
/* First, see if there are more than "a few" fields.
This is trivially true if there are zero or one fields. */
if (!fieldlist || !DECL_CHAIN (fieldlist))
return;
x = fieldlist;
do {
timeout--;
if (DECL_NAME (x) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
timeout = 0;
x = DECL_CHAIN (x);
} while (timeout > 0 && x);
/* If there were "few" fields and no anonymous structures or unions,
avoid the overhead of allocating a hash table. Instead just do
the nested traversal thing. */
if (timeout > 0)
{
for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x))
/* When using -fplan9-extensions, we can have duplicates
between typedef names and fields. */
if (DECL_NAME (x)
|| (flag_plan9_extensions
&& DECL_NAME (x) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))
&& TYPE_NAME (TREE_TYPE (x)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL))
{
for (y = fieldlist; y != x; y = TREE_CHAIN (y))
if (is_duplicate_field (y, x))
{
error ("duplicate member %q+D", x);
DECL_NAME (x) = NULL_TREE;
}
}
}
else
{
hash_table<nofree_ptr_hash <tree_node> > htab (37);
detect_field_duplicates_hash (fieldlist, &htab);
}
}
/* Finish up struct info used by -Wc++-compat. */
static void
warn_cxx_compat_finish_struct (tree fieldlist, enum tree_code code,
location_t record_loc)
{
unsigned int ix;
tree x;
struct c_binding *b;
if (fieldlist == NULL_TREE)
{
if (code == RECORD_TYPE)
warning_at (record_loc, OPT_Wc___compat,
"empty struct has size 0 in C, size 1 in C++");
else
warning_at (record_loc, OPT_Wc___compat,
"empty union has size 0 in C, size 1 in C++");
}
/* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in
the current struct. We do this now at the end of the struct
because the flag is used to issue visibility warnings, and we
only want to issue those warnings if the type is referenced
outside of the struct declaration. */
FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x)
C_TYPE_DEFINED_IN_STRUCT (x) = 1;
/* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of
typedefs used when declaring fields in this struct. If the name
of any of the fields is also a typedef name then the struct would
not parse in C++, because the C++ lookup rules say that the
typedef name would be looked up in the context of the struct, and
would thus be the field rather than the typedef. */
if (!struct_parse_info->typedefs_seen.is_empty ()
&& fieldlist != NULL_TREE)
{
/* Use a hash_set<tree> using the name of the typedef. We can use
a hash_set<tree> because identifiers are interned. */
hash_set<tree> tset;
FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x)
tset.add (DECL_NAME (x));
for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x))
{
if (DECL_NAME (x) != NULL_TREE
&& tset.contains (DECL_NAME (x)))
{
warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat,
("using %qD as both field and typedef name is "
"invalid in C++"),
x);
/* FIXME: It would be nice to report the location where
the typedef name is used. */
}
}
}
/* For each field which has a binding and which was not defined in
an enclosing struct, clear the in_struct field. */
FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b)
b->in_struct = 0;
}
/* Function to help qsort sort FIELD_DECLs by name order. */
static int
field_decl_cmp (const void *x_p, const void *y_p)
{
const tree *const x = (const tree *) x_p;
const tree *const y = (const tree *) y_p;
if (DECL_NAME (*x) == DECL_NAME (*y))
/* A nontype is "greater" than a type. */
return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL);
if (DECL_NAME (*x) == NULL_TREE)
return -1;
if (DECL_NAME (*y) == NULL_TREE)
return 1;
if (DECL_NAME (*x) < DECL_NAME (*y))
return -1;
return 1;
}
/* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T.
LOC is the location of the RECORD_TYPE or UNION_TYPE's definition.
FIELDLIST is a chain of FIELD_DECL nodes for the fields.
ATTRIBUTES are attributes to be applied to the structure.
ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when
the struct was started. */
tree
finish_struct (location_t loc, tree t, tree fieldlist, tree attributes,
struct c_struct_parse_info *enclosing_struct_parse_info)
{
tree x;
bool toplevel = file_scope == current_scope;
/* If this type was previously laid out as a forward reference,
make sure we lay it out again. */
TYPE_SIZE (t) = NULL_TREE;
decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
if (pedantic)
{
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (DECL_NAME (x) != NULL_TREE)
break;
if (flag_isoc11 && RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
break;
}
if (x == NULL_TREE)
{
if (TREE_CODE (t) == UNION_TYPE)
{
if (fieldlist)
pedwarn (loc, OPT_Wpedantic, "union has no named members");
else
pedwarn (loc, OPT_Wpedantic, "union has no members");
}
else
{
if (fieldlist)
pedwarn (loc, OPT_Wpedantic, "struct has no named members");
else
pedwarn (loc, OPT_Wpedantic, "struct has no members");
}
}
}
/* Install struct as DECL_CONTEXT of each field decl.
Also process specified field sizes, found in the DECL_INITIAL,
storing 0 there after the type has been changed to precision equal
to its width, rather than the precision of the specified standard
type. (Correct layout requires the original type to have been preserved
until now.) */
bool saw_named_field = false;
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (TREE_TYPE (x) == error_mark_node)
continue;
DECL_CONTEXT (x) = t;
/* If any field is const, the structure type is pseudo-const. */
if (TREE_READONLY (x))
C_TYPE_FIELDS_READONLY (t) = 1;
else
{
/* A field that is pseudo-const makes the structure likewise. */
tree t1 = strip_array_types (TREE_TYPE (x));
if (RECORD_OR_UNION_TYPE_P (t1) && C_TYPE_FIELDS_READONLY (t1))
C_TYPE_FIELDS_READONLY (t) = 1;
}
/* Any field that is volatile means variables of this type must be
treated in some ways as volatile. */
if (TREE_THIS_VOLATILE (x))
C_TYPE_FIELDS_VOLATILE (t) = 1;
/* Any field of nominal variable size implies structure is too. */
if (C_DECL_VARIABLE_SIZE (x))
C_TYPE_VARIABLE_SIZE (t) = 1;
if (DECL_C_BIT_FIELD (x))
{
unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x));
DECL_SIZE (x) = bitsize_int (width);
DECL_BIT_FIELD (x) = 1;
}
if (TYPE_PACKED (t)
&& (DECL_BIT_FIELD (x)
|| TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT))
DECL_PACKED (x) = 1;
/* Detect flexible array member in an invalid context. */
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
&& TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE)
{
if (TREE_CODE (t) == UNION_TYPE)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member in union");
TREE_TYPE (x) = error_mark_node;
}
else if (DECL_CHAIN (x) != NULL_TREE)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member not at end of struct");
TREE_TYPE (x) = error_mark_node;
}
else if (!saw_named_field)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member in a struct with no named "
"members");
TREE_TYPE (x) = error_mark_node;
}
}
if (pedantic && TREE_CODE (t) == RECORD_TYPE
&& flexible_array_type_p (TREE_TYPE (x)))
pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic,
"invalid use of structure with flexible array member");
if (DECL_NAME (x)
|| RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
saw_named_field = true;
}
detect_field_duplicates (fieldlist);
/* Now we have the nearly final fieldlist. Record it,
then lay out the structure or union (including the fields). */
TYPE_FIELDS (t) = fieldlist;
maybe_apply_pragma_scalar_storage_order (t);
layout_type (t);
if (TYPE_SIZE_UNIT (t)
&& TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST
&& !TREE_OVERFLOW (TYPE_SIZE_UNIT (t))
&& !valid_constant_size_p (TYPE_SIZE_UNIT (t)))
error ("type %qT is too large", t);
/* Give bit-fields their proper types and rewrite the type of array fields
with scalar component if the enclosing type has reverse storage order. */
for (tree field = fieldlist; field; field = DECL_CHAIN (field))
{
if (TREE_CODE (field) == FIELD_DECL
&& DECL_INITIAL (field)
&& TREE_TYPE (field) != error_mark_node)
{
unsigned HOST_WIDE_INT width
= tree_to_uhwi (DECL_INITIAL (field));
tree type = TREE_TYPE (field);
if (width != TYPE_PRECISION (type))
{
TREE_TYPE (field)
= c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type));
SET_DECL_MODE (field, TYPE_MODE (TREE_TYPE (field)));
}
DECL_INITIAL (field) = NULL_TREE;
}
else if (TYPE_REVERSE_STORAGE_ORDER (t)
&& TREE_CODE (field) == FIELD_DECL
&& TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE)
{
tree ftype = TREE_TYPE (field);
tree ctype = strip_array_types (ftype);
if (!RECORD_OR_UNION_TYPE_P (ctype) && TYPE_MODE (ctype) != QImode)
{
tree fmain_type = TYPE_MAIN_VARIANT (ftype);
tree *typep = &fmain_type;
do {
*typep = build_distinct_type_copy (*typep);
TYPE_REVERSE_STORAGE_ORDER (*typep) = 1;
typep = &TREE_TYPE (*typep);
} while (TREE_CODE (*typep) == ARRAY_TYPE);
TREE_TYPE (field)
= c_build_qualified_type (fmain_type, TYPE_QUALS (ftype));
}
}
}
/* Now we have the truly final field list.
Store it in this type and in the variants. */
TYPE_FIELDS (t) = fieldlist;
/* If there are lots of fields, sort so we can look through them fast.
We arbitrarily consider 16 or more elts to be "a lot". */
{
int len = 0;
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (len > 15 || DECL_NAME (x) == NULL)
break;
len += 1;
}
if (len > 15)
{
tree *field_array;
struct lang_type *space;
struct sorted_fields_type *space2;
len += list_length (x);
/* Use the same allocation policy here that make_node uses, to
ensure that this lives as long as the rest of the struct decl.
All decls in an inline function need to be saved. */
space = ggc_cleared_alloc<struct lang_type> ();
space2 = (sorted_fields_type *) ggc_internal_alloc
(sizeof (struct sorted_fields_type) + len * sizeof (tree));
len = 0;
space->s = space2;
field_array = &space2->elts[0];
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
field_array[len++] = x;
/* If there is anonymous struct or union, break out of the loop. */
if (DECL_NAME (x) == NULL)
break;
}
/* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */
if (x == NULL)
{
TYPE_LANG_SPECIFIC (t) = space;
TYPE_LANG_SPECIFIC (t)->s->len = len;
field_array = TYPE_LANG_SPECIFIC (t)->s->elts;
qsort (field_array, len, sizeof (tree), field_decl_cmp);
}
}
}
/* Note: C_TYPE_INCOMPLETE_VARS overloads TYPE_VFIELD which is used
in dwarf2out via rest_of_decl_compilation below and means
something totally different. Since we will be clearing
C_TYPE_INCOMPLETE_VARS shortly after we iterate through them,
clear it ahead of time and avoid problems in dwarf2out. Ideally,
C_TYPE_INCOMPLETE_VARS should use some language specific
node. */
tree incomplete_vars = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t));
for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x))
{
TYPE_FIELDS (x) = TYPE_FIELDS (t);
TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t);
C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t);
C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t);
C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t);
C_TYPE_INCOMPLETE_VARS (x) = NULL_TREE;
}
/* If this was supposed to be a transparent union, but we can't
make it one, warn and turn off the flag. */
if (TREE_CODE (t) == UNION_TYPE
&& TYPE_TRANSPARENT_AGGR (t)
&& (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t))))
{
TYPE_TRANSPARENT_AGGR (t) = 0;
warning_at (loc, 0, "union cannot be made transparent");
}
/* Update type location to the one of the definition, instead of e.g.
a forward declaration. */
if (TYPE_STUB_DECL (t))
DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc;
/* Finish debugging output for this type. */
rest_of_type_compilation (t, toplevel);
/* If this structure or union completes the type of any previous
variable declaration, lay it out and output its rtl. */
for (x = incomplete_vars; x; x = TREE_CHAIN (x))
{
tree decl = TREE_VALUE (x);
if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
layout_array_type (TREE_TYPE (decl));
if (TREE_CODE (decl) != TYPE_DECL)
{
layout_decl (decl, 0);
if (c_dialect_objc ())
objc_check_decl (decl);
rest_of_decl_compilation (decl, toplevel, 0);
}
}
/* If we're inside a function proper, i.e. not file-scope and not still
parsing parameters, then arrange for the size of a variable sized type
to be bound now. */
if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE))
add_stmt (build_stmt (loc,
DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t)));
if (warn_cxx_compat)
warn_cxx_compat_finish_struct (fieldlist, TREE_CODE (t), loc);
delete struct_parse_info;
struct_parse_info = enclosing_struct_parse_info;
/* If this struct is defined inside a struct, add it to
struct_types. */
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
struct_parse_info->struct_types.safe_push (t);
return t;
}
static struct {
gt_pointer_operator new_value;
void *cookie;
} resort_data;
/* This routine compares two fields like field_decl_cmp but using the
pointer operator in resort_data. */
static int
resort_field_decl_cmp (const void *x_p, const void *y_p)
{
const tree *const x = (const tree *) x_p;
const tree *const y = (const tree *) y_p;
if (DECL_NAME (*x) == DECL_NAME (*y))
/* A nontype is "greater" than a type. */
return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL);
if (DECL_NAME (*x) == NULL_TREE)
return -1;
if (DECL_NAME (*y) == NULL_TREE)
return 1;
{
tree d1 = DECL_NAME (*x);
tree d2 = DECL_NAME (*y);
resort_data.new_value (&d1, resort_data.cookie);
resort_data.new_value (&d2, resort_data.cookie);
if (d1 < d2)
return -1;
}
return 1;
}
/* Resort DECL_SORTED_FIELDS because pointers have been reordered. */
void
resort_sorted_fields (void *obj,
void * ARG_UNUSED (orig_obj),
gt_pointer_operator new_value,
void *cookie)
{
struct sorted_fields_type *sf = (struct sorted_fields_type *) obj;
resort_data.new_value = new_value;
resort_data.cookie = cookie;
qsort (&sf->elts[0], sf->len, sizeof (tree),
resort_field_decl_cmp);
}
/* Lay out the type T, and its element type, and so on. */
static void
layout_array_type (tree t)
{
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
layout_array_type (TREE_TYPE (t));
layout_type (t);
}
/* Begin compiling the definition of an enumeration type.
NAME is its name (or null if anonymous).
LOC is the enum's location.
Returns the type object, as yet incomplete.
Also records info about it so that build_enumerator
may be used to declare the individual values as they are read. */
tree
start_enum (location_t loc, struct c_enum_contents *the_enum, tree name)
{
tree enumtype = NULL_TREE;
location_t enumloc = UNKNOWN_LOCATION;
/* If this is the real definition for a previous forward reference,
fill in the contents in the same object that used to be the
forward reference. */
if (name != NULL_TREE)
enumtype = lookup_tag (ENUMERAL_TYPE, name, true, &enumloc);
if (enumtype == NULL_TREE || TREE_CODE (enumtype) != ENUMERAL_TYPE)
{
enumtype = make_node (ENUMERAL_TYPE);
pushtag (loc, name, enumtype);
}
/* Update type location to the one of the definition, instead of e.g.
a forward declaration. */
else if (TYPE_STUB_DECL (enumtype))
{
enumloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype));
DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype)) = loc;
}
if (C_TYPE_BEING_DEFINED (enumtype))
error_at (loc, "nested redefinition of %<enum %E%>", name);
C_TYPE_BEING_DEFINED (enumtype) = 1;
if (TYPE_VALUES (enumtype) != NULL_TREE)
{
/* This enum is a named one that has been declared already. */
error_at (loc, "redeclaration of %<enum %E%>", name);
if (enumloc != UNKNOWN_LOCATION)
inform (enumloc, "originally defined here");
/* Completely replace its old definition.
The old enumerators remain defined, however. */
TYPE_VALUES (enumtype) = NULL_TREE;
}
the_enum->enum_next_value = integer_zero_node;
the_enum->enum_overflow = 0;
if (flag_short_enums)
for (tree v = TYPE_MAIN_VARIANT (enumtype); v; v = TYPE_NEXT_VARIANT (v))
TYPE_PACKED (v) = 1;
/* FIXME: This will issue a warning for a use of a type defined
within sizeof in a statement expr. This is not terribly serious
as C++ doesn't permit statement exprs within sizeof anyhow. */
if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof))
warning_at (loc, OPT_Wc___compat,
"defining type in %qs expression is invalid in C++",
(in_sizeof
? "sizeof"
: (in_typeof ? "typeof" : "alignof")));
return enumtype;
}
/* After processing and defining all the values of an enumeration type,
install their decls in the enumeration type and finish it off.
ENUMTYPE is the type object, VALUES a list of decl-value pairs,
and ATTRIBUTES are the specified attributes.
Returns ENUMTYPE. */
tree
finish_enum (tree enumtype, tree values, tree attributes)
{
tree pair, tem;
tree minnode = NULL_TREE, maxnode = NULL_TREE;
int precision;
signop sign;
bool toplevel = (file_scope == current_scope);
struct lang_type *lt;
decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
/* Calculate the maximum value of any enumerator in this type. */
if (values == error_mark_node)
minnode = maxnode = integer_zero_node;
else
{
minnode = maxnode = TREE_VALUE (values);
for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair))
{
tree value = TREE_VALUE (pair);
if (tree_int_cst_lt (maxnode, value))
maxnode = value;
if (tree_int_cst_lt (value, minnode))
minnode = value;
}
}
/* Construct the final type of this enumeration. It is the same
as one of the integral types - the narrowest one that fits, except
that normally we only go as narrow as int - and signed iff any of
the values are negative. */
sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED;
precision = MAX (tree_int_cst_min_precision (minnode, sign),
tree_int_cst_min_precision (maxnode, sign));
/* If the precision of the type was specified with an attribute and it
was too small, give an error. Otherwise, use it. */
if (TYPE_PRECISION (enumtype) && lookup_attribute ("mode", attributes))
{
if (precision > TYPE_PRECISION (enumtype))
{
TYPE_PRECISION (enumtype) = 0;
error ("specified mode too small for enumeral values");
}
else
precision = TYPE_PRECISION (enumtype);
}
else
TYPE_PRECISION (enumtype) = 0;
if (TYPE_PACKED (enumtype)
|| precision > TYPE_PRECISION (integer_type_node)
|| TYPE_PRECISION (enumtype))
{
tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0);
if (tem == NULL)
{
warning (0, "enumeration values exceed range of largest integer");
tem = long_long_integer_type_node;
}
}
else
tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node;
TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem);
TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem);
TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem);
SET_TYPE_ALIGN (enumtype, TYPE_ALIGN (tem));
TYPE_SIZE (enumtype) = NULL_TREE;
TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem);
layout_type (enumtype);
if (values != error_mark_node)
{
/* Change the type of the enumerators to be the enum type. We
need to do this irrespective of the size of the enum, for
proper type checking. Replace the DECL_INITIALs of the
enumerators, and the value slots of the list, with copies
that have the enum type; they cannot be modified in place
because they may be shared (e.g. integer_zero_node) Finally,
change the purpose slots to point to the names of the decls. */
for (pair = values; pair; pair = TREE_CHAIN (pair))
{
tree enu = TREE_PURPOSE (pair);
tree ini = DECL_INITIAL (enu);
TREE_TYPE (enu) = enumtype;
/* The ISO C Standard mandates enumerators to have type int,
even though the underlying type of an enum type is
unspecified. However, GCC allows enumerators of any
integer type as an extensions. build_enumerator()
converts any enumerators that fit in an int to type int,
to avoid promotions to unsigned types when comparing
integers with enumerators that fit in the int range.
When -pedantic is given, build_enumerator() would have
already warned about those that don't fit. Here we
convert the rest to the enumerator type. */
if (TREE_TYPE (ini) != integer_type_node)
ini = convert (enumtype, ini);
DECL_INITIAL (enu) = ini;
TREE_PURPOSE (pair) = DECL_NAME (enu);
TREE_VALUE (pair) = ini;
}
TYPE_VALUES (enumtype) = values;
}
/* Record the min/max values so that we can warn about bit-field
enumerations that are too small for the values. */
lt = ggc_cleared_alloc<struct lang_type> ();
lt->enum_min = minnode;
lt->enum_max = maxnode;
TYPE_LANG_SPECIFIC (enumtype) = lt;
/* Fix up all variant types of this enum type. */
for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem))
{
if (tem == enumtype)
continue;
TYPE_VALUES (tem) = TYPE_VALUES (enumtype);
TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype);
TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype);
TYPE_SIZE (tem) = TYPE_SIZE (enumtype);
TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype);
SET_TYPE_MODE (tem, TYPE_MODE (enumtype));
TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype);
SET_TYPE_ALIGN (tem, TYPE_ALIGN (enumtype));
TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype);
TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype);
TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype);
}
/* Finish debugging output for this type. */
rest_of_type_compilation (enumtype, toplevel);
/* If this enum is defined inside a struct, add it to
struct_types. */
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
struct_parse_info->struct_types.safe_push (enumtype);
return enumtype;
}
/* Build and install a CONST_DECL for one value of the
current enumeration type (one that was begun with start_enum).
DECL_LOC is the location of the enumerator.
LOC is the location of the '=' operator if any, DECL_LOC otherwise.
Return a tree-list containing the CONST_DECL and its value.
Assignment of sequential values by default is handled here. */
tree
build_enumerator (location_t decl_loc, location_t loc,
struct c_enum_contents *the_enum, tree name, tree value)
{
tree decl, type;
/* Validate and default VALUE. */
if (value != NULL_TREE)
{
/* Don't issue more errors for error_mark_node (i.e. an
undeclared identifier) - just ignore the value expression. */
if (value == error_mark_node)
value = NULL_TREE;
else if (!INTEGRAL_TYPE_P (TREE_TYPE (value)))
{
error_at (loc, "enumerator value for %qE is not an integer constant",
name);
value = NULL_TREE;
}
else
{
if (TREE_CODE (value) != INTEGER_CST)
{
value = c_fully_fold (value, false, NULL);
if (TREE_CODE (value) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"enumerator value for %qE is not an integer "
"constant expression", name);
}
if (TREE_CODE (value) != INTEGER_CST)
{
error ("enumerator value for %qE is not an integer constant",
name);
value = NULL_TREE;
}
else
{
value = default_conversion (value);
constant_expression_warning (value);
}
}
}
/* Default based on previous value. */
/* It should no longer be possible to have NON_LVALUE_EXPR
in the default. */
if (value == NULL_TREE)
{
value = the_enum->enum_next_value;
if (the_enum->enum_overflow)
error_at (loc, "overflow in enumeration values");
}
/* Even though the underlying type of an enum is unspecified, the
type of enumeration constants is explicitly defined as int
(6.4.4.3/2 in the C99 Standard). GCC allows any integer type as
an extension. */
else if (!int_fits_type_p (value, integer_type_node))
pedwarn (loc, OPT_Wpedantic,
"ISO C restricts enumerator values to range of %<int%>");
/* The ISO C Standard mandates enumerators to have type int, even
though the underlying type of an enum type is unspecified.
However, GCC allows enumerators of any integer type as an
extensions. Here we convert any enumerators that fit in an int
to type int, to avoid promotions to unsigned types when comparing
integers with enumerators that fit in the int range. When
-pedantic is given, we would have already warned about those that
don't fit. We have to do this here rather than in finish_enum
because this value may be used to define more enumerators. */
if (int_fits_type_p (value, integer_type_node))
value = convert (integer_type_node, value);
/* Set basis for default for next value. */
the_enum->enum_next_value
= build_binary_op (EXPR_LOC_OR_LOC (value, input_location),
PLUS_EXPR, value, integer_one_node, false);
the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value);
/* Now create a declaration for the enum value name. */
type = TREE_TYPE (value);
type = c_common_type_for_size (MAX (TYPE_PRECISION (type),
TYPE_PRECISION (integer_type_node)),
(TYPE_PRECISION (type)
>= TYPE_PRECISION (integer_type_node)
&& TYPE_UNSIGNED (type)));
decl = build_decl (decl_loc, CONST_DECL, name, type);
DECL_INITIAL (decl) = convert (type, value);
pushdecl (decl);
return tree_cons (decl, value, NULL_TREE);
}
/* Create the FUNCTION_DECL for a function definition.
DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of
the declaration; they describe the function's name and the type it returns,
but twisted together in a fashion that parallels the syntax of C.
This function creates a binding context for the function body
as well as setting up the FUNCTION_DECL in current_function_decl.
Returns true on success. If the DECLARATOR is not suitable for a function
(it defines a datum instead), we return false to report a parse error. */
bool
start_function (struct c_declspecs *declspecs, struct c_declarator *declarator,
tree attributes)
{
tree decl1, old_decl;
tree restype, resdecl;
location_t loc;
current_function_returns_value = 0; /* Assume, until we see it does. */
current_function_returns_null = 0;
current_function_returns_abnormally = 0;
warn_about_return_type = 0;
c_switch_stack = NULL;
/* Indicate no valid break/continue context by setting these variables
to some non-null, non-label value. We'll notice and emit the proper
error message in c_finish_bc_stmt. */
c_break_label = c_cont_label = size_zero_node;
decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL,
&attributes, NULL, NULL, DEPRECATED_NORMAL);
invoke_plugin_callbacks (PLUGIN_START_PARSE_FUNCTION, decl1);
/* If the declarator is not suitable for a function definition,
cause a syntax error. */
if (decl1 == NULL_TREE
|| TREE_CODE (decl1) != FUNCTION_DECL)
return false;
loc = DECL_SOURCE_LOCATION (decl1);
c_decl_attributes (&decl1, attributes, 0);
if (DECL_DECLARED_INLINE_P (decl1)
&& DECL_UNINLINABLE (decl1)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1)))
warning_at (loc, OPT_Wattributes,
"inline function %qD given attribute noinline",
decl1);
/* Handle gnu_inline attribute. */
if (declspecs->inline_p
&& !flag_gnu89_inline
&& TREE_CODE (decl1) == FUNCTION_DECL
&& (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1))
|| current_function_decl))
{
if (declspecs->storage_class != csc_static)
DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1);
}
announce_function (decl1);
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1))))
{
error_at (loc, "return type is an incomplete type");
/* Make it return void instead. */
TREE_TYPE (decl1)
= build_function_type (void_type_node,
TYPE_ARG_TYPES (TREE_TYPE (decl1)));
}
if (warn_about_return_type)
warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int
: (warn_return_type ? OPT_Wreturn_type
: OPT_Wimplicit_int),
"return type defaults to %<int%>");
/* Make the init_value nonzero so pushdecl knows this is not tentative.
error_mark_node is replaced below (in pop_scope) with the BLOCK. */
DECL_INITIAL (decl1) = error_mark_node;
/* A nested function is not global. */
if (current_function_decl != NULL_TREE)
TREE_PUBLIC (decl1) = 0;
/* If this definition isn't a prototype and we had a prototype declaration
before, copy the arg type info from that prototype. */
old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope);
if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL)
old_decl = NULL_TREE;
current_function_prototype_locus = UNKNOWN_LOCATION;
current_function_prototype_built_in = false;
current_function_prototype_arg_types = NULL_TREE;
if (!prototype_p (TREE_TYPE (decl1)))
{
if (old_decl != NULL_TREE
&& TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (decl1)),
TREE_TYPE (TREE_TYPE (old_decl))))
{
if (stdarg_p (TREE_TYPE (old_decl)))
{
warning_at (loc, 0, "%q+D defined as variadic function "
"without prototype", decl1);
locate_old_decl (old_decl);
}
TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl),
TREE_TYPE (decl1));
current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl);
current_function_prototype_built_in
= C_DECL_BUILTIN_PROTOTYPE (old_decl);
current_function_prototype_arg_types
= TYPE_ARG_TYPES (TREE_TYPE (decl1));
}
if (TREE_PUBLIC (decl1))
{
/* If there is an external prototype declaration of this
function, record its location but do not copy information
to this decl. This may be an invisible declaration
(built-in or in a scope which has finished) or simply
have more refined argument types than any declaration
found above. */
struct c_binding *b;
for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed)
if (B_IN_SCOPE (b, external_scope))
break;
if (b)
{
tree ext_decl, ext_type;
ext_decl = b->decl;
ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl);
if (TREE_CODE (ext_type) == FUNCTION_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (decl1)),
TREE_TYPE (ext_type)))
{
current_function_prototype_locus
= DECL_SOURCE_LOCATION (ext_decl);
current_function_prototype_built_in
= C_DECL_BUILTIN_PROTOTYPE (ext_decl);
current_function_prototype_arg_types
= TYPE_ARG_TYPES (ext_type);
}
}
}
}
/* Optionally warn of old-fashioned def with no previous prototype. */
if (warn_strict_prototypes
&& old_decl != error_mark_node
&& !prototype_p (TREE_TYPE (decl1))
&& C_DECL_ISNT_PROTOTYPE (old_decl))
warning_at (loc, OPT_Wstrict_prototypes,
"function declaration isn%'t a prototype");
/* Optionally warn of any global def with no previous prototype. */
else if (warn_missing_prototypes
&& old_decl != error_mark_node
&& TREE_PUBLIC (decl1)
&& !MAIN_NAME_P (DECL_NAME (decl1))
&& C_DECL_ISNT_PROTOTYPE (old_decl)
&& !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_prototypes,
"no previous prototype for %qD", decl1);
/* Optionally warn of any def with no previous prototype
if the function has already been used. */
else if (warn_missing_prototypes
&& old_decl != NULL_TREE
&& old_decl != error_mark_node
&& TREE_USED (old_decl)
&& !prototype_p (TREE_TYPE (old_decl)))
warning_at (loc, OPT_Wmissing_prototypes,
"%qD was used with no prototype before its definition", decl1);
/* Optionally warn of any global def with no previous declaration. */
else if (warn_missing_declarations
&& TREE_PUBLIC (decl1)
&& old_decl == NULL_TREE
&& !MAIN_NAME_P (DECL_NAME (decl1))
&& !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_declarations,
"no previous declaration for %qD",
decl1);
/* Optionally warn of any def with no previous declaration
if the function has already been used. */
else if (warn_missing_declarations
&& old_decl != NULL_TREE
&& old_decl != error_mark_node
&& TREE_USED (old_decl)
&& C_DECL_IMPLICIT (old_decl))
warning_at (loc, OPT_Wmissing_declarations,
"%qD was used with no declaration before its definition", decl1);
/* This function exists in static storage.
(This does not mean `static' in the C sense!) */
TREE_STATIC (decl1) = 1;
/* This is the earliest point at which we might know the assembler
name of the function. Thus, if it's set before this, die horribly. */
gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1));
/* If #pragma weak was used, mark the decl weak now. */
if (current_scope == file_scope)
maybe_apply_pragma_weak (decl1);
/* Warn for unlikely, improbable, or stupid declarations of `main'. */
if (warn_main && MAIN_NAME_P (DECL_NAME (decl1)))
{
if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1)))
!= integer_type_node)
pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1);
else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1))))
pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD",
decl1);
check_main_parameter_types (decl1);
if (!TREE_PUBLIC (decl1))
pedwarn (loc, OPT_Wmain,
"%qD is normally a non-static function", decl1);
}
/* Record the decl so that the function name is defined.
If we already have a decl for this name, and it is a FUNCTION_DECL,
use the old decl. */
current_function_decl = pushdecl (decl1);
push_scope ();
declare_parm_level ();
restype = TREE_TYPE (TREE_TYPE (current_function_decl));
resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype);
DECL_ARTIFICIAL (resdecl) = 1;
DECL_IGNORED_P (resdecl) = 1;
DECL_RESULT (current_function_decl) = resdecl;
start_fname_decls ();
return true;
}
/* Subroutine of store_parm_decls which handles new-style function
definitions (prototype format). The parms already have decls, so we
need only record them as in effect and complain if any redundant
old-style parm decls were written. */
static void
store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info)
{
tree decl;
c_arg_tag *tag;
unsigned ix;
if (current_scope->bindings)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"old-style parameter declarations in prototyped "
"function definition");
/* Get rid of the old-style declarations. */
pop_scope ();
push_scope ();
}
/* Don't issue this warning for nested functions, and don't issue this
warning if we got here because ARG_INFO_TYPES was error_mark_node
(this happens when a function definition has just an ellipsis in
its parameter list). */
else if (!in_system_header_at (input_location)
&& !current_function_scope
&& arg_info->types != error_mark_node)
warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional,
"traditional C rejects ISO C style function definitions");
/* Now make all the parameter declarations visible in the function body.
We can bypass most of the grunt work of pushdecl. */
for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl))
{
bind (DECL_NAME (decl), decl, current_scope,
/*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
if (!TREE_USED (decl))
warn_if_shadowing (decl);
}
else
error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted");
}
/* Record the parameter list in the function declaration. */
DECL_ARGUMENTS (fndecl) = arg_info->parms;
/* Now make all the ancillary declarations visible, likewise. */
for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl))
bind (DECL_NAME (decl), decl, current_scope,
/*invisible=*/false,
/*nested=*/(TREE_CODE (decl) == FUNCTION_DECL),
UNKNOWN_LOCATION);
}
/* And all the tag declarations. */
FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
if (tag->id)
bind (tag->id, tag->type, current_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
}
/* Subroutine of store_parm_decls which handles old-style function
definitions (separate parameter list and declarations). */
static void
store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info)
{
struct c_binding *b;
tree parm, decl, last;
tree parmids = arg_info->parms;
hash_set<tree> seen_args;
if (!in_system_header_at (input_location))
warning_at (DECL_SOURCE_LOCATION (fndecl),
OPT_Wold_style_definition, "old-style function definition");
/* Match each formal parameter name with its declaration. Save each
decl in the appropriate TREE_PURPOSE slot of the parmids chain. */
for (parm = parmids; parm; parm = TREE_CHAIN (parm))
{
if (TREE_VALUE (parm) == NULL_TREE)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"parameter name missing from parameter list");
TREE_PURPOSE (parm) = NULL_TREE;
continue;
}
b = I_SYMBOL_BINDING (TREE_VALUE (parm));
if (b && B_IN_CURRENT_SCOPE (b))
{
decl = b->decl;
/* Skip erroneous parameters. */
if (decl == error_mark_node)
continue;
/* If we got something other than a PARM_DECL it is an error. */
if (TREE_CODE (decl) != PARM_DECL)
{
error_at (DECL_SOURCE_LOCATION (decl),
"%qD declared as a non-parameter", decl);
continue;
}
/* If the declaration is already marked, we have a duplicate
name. Complain and ignore the duplicate. */
else if (seen_args.contains (decl))
{
error_at (DECL_SOURCE_LOCATION (decl),
"multiple parameters named %qD", decl);
TREE_PURPOSE (parm) = NULL_TREE;
continue;
}
/* If the declaration says "void", complain and turn it into
an int. */
else if (VOID_TYPE_P (TREE_TYPE (decl)))
{
error_at (DECL_SOURCE_LOCATION (decl),
"parameter %qD declared with void type", decl);
TREE_TYPE (decl) = integer_type_node;
DECL_ARG_TYPE (decl) = integer_type_node;
layout_decl (decl, 0);
}
warn_if_shadowing (decl);
}
/* If no declaration found, default to int. */
else
{
/* FIXME diagnostics: This should be the location of the argument,
not the FNDECL. E.g., for an old-style declaration
int f10(v) { blah; }
We should use the location of the V, not the F10.
Unfortunately, the V is an IDENTIFIER_NODE which has no
location. In the future we need locations for c_arg_info
entries.
See gcc.dg/Wshadow-3.c for an example of this problem. */
decl = build_decl (DECL_SOURCE_LOCATION (fndecl),
PARM_DECL, TREE_VALUE (parm), integer_type_node);
DECL_ARG_TYPE (decl) = TREE_TYPE (decl);
pushdecl (decl);
warn_if_shadowing (decl);
if (flag_isoc99)
pedwarn (DECL_SOURCE_LOCATION (decl),
OPT_Wimplicit_int, "type of %qD defaults to %<int%>",
decl);
else
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wmissing_parameter_type,
"type of %qD defaults to %<int%>", decl);
}
TREE_PURPOSE (parm) = decl;
seen_args.add (decl);
}
/* Now examine the parms chain for incomplete declarations
and declarations with no corresponding names. */
for (b = current_scope->bindings; b; b = b->prev)
{
parm = b->decl;
if (TREE_CODE (parm) != PARM_DECL)
continue;
if (TREE_TYPE (parm) != error_mark_node
&& !COMPLETE_TYPE_P (TREE_TYPE (parm)))
{
error_at (DECL_SOURCE_LOCATION (parm),
"parameter %qD has incomplete type", parm);
TREE_TYPE (parm) = error_mark_node;
}
if (!seen_args.contains (parm))
{
error_at (DECL_SOURCE_LOCATION (parm),
"declaration for parameter %qD but no such parameter",
parm);
/* Pretend the parameter was not missing.
This gets us to a standard state and minimizes
further error messages. */
parmids = chainon (parmids, tree_cons (parm, 0, 0));
}
}
/* Chain the declarations together in the order of the list of
names. Store that chain in the function decl, replacing the
list of names. Update the current scope to match. */
DECL_ARGUMENTS (fndecl) = NULL_TREE;
for (parm = parmids; parm; parm = TREE_CHAIN (parm))
if (TREE_PURPOSE (parm))
break;
if (parm && TREE_PURPOSE (parm))
{
last = TREE_PURPOSE (parm);
DECL_ARGUMENTS (fndecl) = last;
for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm))
if (TREE_PURPOSE (parm))
{
DECL_CHAIN (last) = TREE_PURPOSE (parm);
last = TREE_PURPOSE (parm);
}
DECL_CHAIN (last) = NULL_TREE;
}
/* If there was a previous prototype,
set the DECL_ARG_TYPE of each argument according to
the type previously specified, and report any mismatches. */
if (current_function_prototype_arg_types)
{
tree type;
for (parm = DECL_ARGUMENTS (fndecl),
type = current_function_prototype_arg_types;
parm || (type != NULL_TREE
&& TREE_VALUE (type) != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node);
parm = DECL_CHAIN (parm), type = TREE_CHAIN (type))
{
if (parm == NULL_TREE
|| type == NULL_TREE
|| (TREE_VALUE (type) != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node))
{
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (fndecl),
0, "number of arguments doesn%'t match "
"built-in prototype");
else
{
/* FIXME diagnostics: This should be the location of
FNDECL, but there is bug when a prototype is
declared inside function context, but defined
outside of it (e.g., gcc.dg/pr15698-2.c). In
which case FNDECL gets the location of the
prototype, not the definition. */
error_at (input_location,
"number of arguments doesn%'t match prototype");
error_at (current_function_prototype_locus,
"prototype declaration");
}
break;
}
/* Type for passing arg must be consistent with that
declared for the arg. ISO C says we take the unqualified
type for parameters declared with qualified type. */
if (TREE_TYPE (parm) != error_mark_node
&& TREE_VALUE (type) != error_mark_node
&& ((TYPE_ATOMIC (DECL_ARG_TYPE (parm))
!= TYPE_ATOMIC (TREE_VALUE (type)))
|| !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)),
TYPE_MAIN_VARIANT (TREE_VALUE (type)))))
{
if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm))
== TYPE_ATOMIC (TREE_VALUE (type)))
&& (TYPE_MAIN_VARIANT (TREE_TYPE (parm))
== TYPE_MAIN_VARIANT (TREE_VALUE (type))))
{
/* Adjust argument to match prototype. E.g. a previous
`int foo(float);' prototype causes
`int foo(x) float x; {...}' to be treated like
`int foo(float x) {...}'. This is particularly
useful for argument types like uid_t. */
DECL_ARG_TYPE (parm) = TREE_TYPE (parm);
if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl))
&& INTEGRAL_TYPE_P (TREE_TYPE (parm))
&& (TYPE_PRECISION (TREE_TYPE (parm))
< TYPE_PRECISION (integer_type_node)))
DECL_ARG_TYPE (parm)
= c_type_promotes_to (TREE_TYPE (parm));
/* ??? Is it possible to get here with a
built-in prototype or will it always have
been diagnosed as conflicting with an
old-style definition and discarded? */
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (parm),
OPT_Wpedantic, "promoted argument %qD "
"doesn%'t match built-in prototype", parm);
else
{
pedwarn (DECL_SOURCE_LOCATION (parm),
OPT_Wpedantic, "promoted argument %qD "
"doesn%'t match prototype", parm);
pedwarn (current_function_prototype_locus, OPT_Wpedantic,
"prototype declaration");
}
}
else
{
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (parm),
0, "argument %qD doesn%'t match "
"built-in prototype", parm);
else
{
error_at (DECL_SOURCE_LOCATION (parm),
"argument %qD doesn%'t match prototype", parm);
error_at (current_function_prototype_locus,
"prototype declaration");
}
}
}
}
TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = NULL_TREE;
}
/* Otherwise, create a prototype that would match. */
else
{
tree actual = NULL_TREE, last = NULL_TREE, type;
for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
{
type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE);
if (last)
TREE_CHAIN (last) = type;
else
actual = type;
last = type;
}
type = tree_cons (NULL_TREE, void_type_node, NULL_TREE);
if (last)
TREE_CHAIN (last) = type;
else
actual = type;
/* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES
of the type of this function, but we need to avoid having this
affect the types of other similarly-typed functions, so we must
first force the generation of an identical (but separate) type
node for the relevant function type. The new node we create
will be a variant of the main variant of the original function
type. */
TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl));
TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual;
}
}
/* Store parameter declarations passed in ARG_INFO into the current
function declaration. */
void
store_parm_decls_from (struct c_arg_info *arg_info)
{
current_function_arg_info = arg_info;
store_parm_decls ();
}
/* Called by walk_tree to look for and update context-less labels. */
static tree
set_labels_context_r (tree *tp, int *walk_subtrees, void *data)
{
if (TREE_CODE (*tp) == LABEL_EXPR
&& DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) == NULL_TREE)
{
DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) = static_cast<tree>(data);
*walk_subtrees = 0;
}
return NULL_TREE;
}
/* Store the parameter declarations into the current function declaration.
This is called after parsing the parameter declarations, before
digesting the body of the function.
For an old-style definition, construct a prototype out of the old-style
parameter declarations and inject it into the function's type. */
void
store_parm_decls (void)
{
tree fndecl = current_function_decl;
bool proto;
/* The argument information block for FNDECL. */
struct c_arg_info *arg_info = current_function_arg_info;
current_function_arg_info = 0;
/* True if this definition is written with a prototype. Note:
despite C99 6.7.5.3p14, we can *not* treat an empty argument
list in a function definition as equivalent to (void) -- an
empty argument list specifies the function has no parameters,
but only (void) sets up a prototype for future calls. */
proto = arg_info->types != 0;
if (proto)
store_parm_decls_newstyle (fndecl, arg_info);
else
store_parm_decls_oldstyle (fndecl, arg_info);
/* The next call to push_scope will be a function body. */
next_is_function_body = true;
/* Write a record describing this function definition to the prototypes
file (if requested). */
gen_aux_info_record (fndecl, 1, 0, proto);
/* Initialize the RTL code for the function. */
allocate_struct_function (fndecl, false);
if (warn_unused_local_typedefs)
cfun->language = ggc_cleared_alloc<language_function> ();
/* Begin the statement tree for this function. */
DECL_SAVED_TREE (fndecl) = push_stmt_list ();
/* ??? Insert the contents of the pending sizes list into the function
to be evaluated. The only reason left to have this is
void foo(int n, int array[n++])
because we throw away the array type in favor of a pointer type, and
thus won't naturally see the SAVE_EXPR containing the increment. All
other pending sizes would be handled by gimplify_parameters. */
if (arg_info->pending_sizes)
{
/* In very special circumstances, e.g. for code like
_Atomic int i = 5;
void f (int a[i += 2]) {}
we need to execute the atomic assignment on function entry.
But in this case, it is not just a straight store, it has the
op= form, which means that build_atomic_assign has generated
gotos, labels, etc. Because at that time the function decl
for F has not been created yet, those labels do not have any
function context. But we have the fndecl now, so update the
labels accordingly. gimplify_expr would crash otherwise. */
walk_tree_without_duplicates (&arg_info->pending_sizes,
set_labels_context_r, fndecl);
add_stmt (arg_info->pending_sizes);
}
}
/* Store PARM_DECLs in PARMS into scope temporarily. Used for
c_finish_omp_declare_simd for function prototypes. No diagnostics
should be done. */
void
temp_store_parm_decls (tree fndecl, tree parms)
{
push_scope ();
for (tree p = parms; p; p = DECL_CHAIN (p))
{
DECL_CONTEXT (p) = fndecl;
if (DECL_NAME (p))
bind (DECL_NAME (p), p, current_scope,
/*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
}
}
/* Undo what temp_store_parm_decls did. */
void
temp_pop_parm_decls (void)
{
/* Clear all bindings in this temporary scope, so that
pop_scope doesn't create a BLOCK. */
struct c_binding *b = current_scope->bindings;
current_scope->bindings = NULL;
for (; b; b = free_binding_and_advance (b))
{
gcc_assert (TREE_CODE (b->decl) == PARM_DECL
|| b->decl == error_mark_node);
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
if (b->shadowed && b->shadowed->u.type)
TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type;
}
pop_scope ();
}
/* Finish up a function declaration and compile that function
all the way to assembler language output. Then free the storage
for the function definition.
This is called after parsing the body of the function definition. */
void
finish_function (void)
{
tree fndecl = current_function_decl;
if (c_dialect_objc ())
objc_finish_function ();
if (TREE_CODE (fndecl) == FUNCTION_DECL
&& targetm.calls.promote_prototypes (TREE_TYPE (fndecl)))
{
tree args = DECL_ARGUMENTS (fndecl);
for (; args; args = DECL_CHAIN (args))
{
tree type = TREE_TYPE (args);
if (INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (args) = c_type_promotes_to (type);
}
}
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node)
BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
/* Must mark the RESULT_DECL as being in this function. */
if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node)
DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted
&& TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl)))
== integer_type_node && flag_isoc99)
{
/* Hack. We don't want the middle-end to warn that this return
is unreachable, so we mark its location as special. Using
UNKNOWN_LOCATION has the problem that it gets clobbered in
annotate_one_with_locus. A cleaner solution might be to
ensure ! should_carry_locus_p (stmt), but that needs a flag.
*/
c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE);
}
/* Tie off the statement tree for this function. */
DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl));
finish_fname_decls ();
/* Complain if there's just no return statement. */
if (warn_return_type
&& TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE
&& !current_function_returns_value && !current_function_returns_null
/* Don't complain if we are no-return. */
&& !current_function_returns_abnormally
/* Don't complain if we are declared noreturn. */
&& !TREE_THIS_VOLATILE (fndecl)
/* Don't warn for main(). */
&& !MAIN_NAME_P (DECL_NAME (fndecl))
/* Or if they didn't actually specify a return type. */
&& !C_FUNCTION_IMPLICIT_INT (fndecl)
/* Normally, with -Wreturn-type, flow will complain, but we might
optimize out static functions. */
&& !TREE_PUBLIC (fndecl))
{
warning (OPT_Wreturn_type,
"no return statement in function returning non-void");
TREE_NO_WARNING (fndecl) = 1;
}
/* Complain about parameters that are only set, but never otherwise used. */
if (warn_unused_but_set_parameter)
{
tree decl;
for (decl = DECL_ARGUMENTS (fndecl);
decl;
decl = DECL_CHAIN (decl))
if (TREE_USED (decl)
&& TREE_CODE (decl) == PARM_DECL
&& !DECL_READ_P (decl)
&& DECL_NAME (decl)
&& !DECL_ARTIFICIAL (decl)
&& !TREE_NO_WARNING (decl))
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wunused_but_set_parameter,
"parameter %qD set but not used", decl);
}
/* Complain about locally defined typedefs that are not used in this
function. */
maybe_warn_unused_local_typedefs ();
/* Possibly warn about unused parameters. */
if (warn_unused_parameter)
do_warn_unused_parameter (fndecl);
/* Store the end of the function, so that we get good line number
info for the epilogue. */
cfun->function_end_locus = input_location;
/* Finalize the ELF visibility for the function. */
c_determine_visibility (fndecl);
/* For GNU C extern inline functions disregard inline limits. */
if (DECL_EXTERNAL (fndecl)
&& DECL_DECLARED_INLINE_P (fndecl)
&& (flag_gnu89_inline
|| lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (fndecl))))
DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1;
/* Genericize before inlining. Delay genericizing nested functions
until their parent function is genericized. Since finalizing
requires GENERIC, delay that as well. */
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node
&& !undef_nested_function)
{
if (!decl_function_context (fndecl))
{
invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl);
c_genericize (fndecl);
/* ??? Objc emits functions after finalizing the compilation unit.
This should be cleaned up later and this conditional removed. */
if (symtab->global_info_ready)
{
cgraph_node::add_new_function (fndecl, false);
return;
}
cgraph_node::finalize_function (fndecl, false);
}
else
{
/* Register this function with cgraph just far enough to get it
added to our parent's nested function list. Handy, since the
C front end doesn't have such a list. */
(void) cgraph_node::get_create (fndecl);
}
}
if (!decl_function_context (fndecl))
undef_nested_function = false;
if (cfun->language != NULL)
{
ggc_free (cfun->language);
cfun->language = NULL;
}
/* We're leaving the context of this function, so zap cfun.
It's still in DECL_STRUCT_FUNCTION, and we'll restore it in
tree_rest_of_compilation. */
set_cfun (NULL);
invoke_plugin_callbacks (PLUGIN_FINISH_PARSE_FUNCTION, current_function_decl);
current_function_decl = NULL;
}
/* Check the declarations given in a for-loop for satisfying the C99
constraints. If exactly one such decl is found, return it. LOC is
the location of the opening parenthesis of the for loop. The last
parameter allows you to control the "for loop initial declarations
are only allowed in C99 mode". Normally, you should pass
flag_isoc99 as that parameter. But in some cases (Objective-C
foreach loop, for example) we want to run the checks in this
function even if not in C99 mode, so we allow the caller to turn
off the error about not being in C99 mode.
*/
tree
check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error)
{
struct c_binding *b;
tree one_decl = NULL_TREE;
int n_decls = 0;
if (!turn_off_iso_c99_error)
{
static bool hint = true;
/* If we get here, declarations have been used in a for loop without
the C99 for loop scope. This doesn't make much sense, so don't
allow it. */
error_at (loc, "%<for%> loop initial declarations "
"are only allowed in C99 or C11 mode");
if (hint)
{
inform (loc,
"use option -std=c99, -std=gnu99, -std=c11 or -std=gnu11 "
"to compile your code");
hint = false;
}
return NULL_TREE;
}
/* C99 subclause 6.8.5 paragraph 3:
[#3] The declaration part of a for statement shall only
declare identifiers for objects having storage class auto or
register.
It isn't clear whether, in this sentence, "identifiers" binds to
"shall only declare" or to "objects" - that is, whether all identifiers
declared must be identifiers for objects, or whether the restriction
only applies to those that are. (A question on this in comp.std.c
in November 2000 received no answer.) We implement the strictest
interpretation, to avoid creating an extension which later causes
problems. */
for (b = current_scope->bindings; b; b = b->prev)
{
tree id = b->id;
tree decl = b->decl;
if (!id)
continue;
switch (TREE_CODE (decl))
{
case VAR_DECL:
{
location_t decl_loc = DECL_SOURCE_LOCATION (decl);
if (TREE_STATIC (decl))
error_at (decl_loc,
"declaration of static variable %qD in %<for%> loop "
"initial declaration", decl);
else if (DECL_EXTERNAL (decl))
error_at (decl_loc,
"declaration of %<extern%> variable %qD in %<for%> loop "
"initial declaration", decl);
}
break;
case RECORD_TYPE:
error_at (loc,
"%<struct %E%> declared in %<for%> loop initial "
"declaration", id);
break;
case UNION_TYPE:
error_at (loc,
"%<union %E%> declared in %<for%> loop initial declaration",
id);
break;
case ENUMERAL_TYPE:
error_at (loc, "%<enum %E%> declared in %<for%> loop "
"initial declaration", id);
break;
default:
error_at (loc, "declaration of non-variable "
"%qD in %<for%> loop initial declaration", decl);
}
n_decls++;
one_decl = decl;
}
return n_decls == 1 ? one_decl : NULL_TREE;
}
/* Save and reinitialize the variables
used during compilation of a C function. */
void
c_push_function_context (void)
{
struct language_function *p = cfun->language;
/* cfun->language might have been already allocated by the use of
-Wunused-local-typedefs. In that case, just re-use it. */
if (p == NULL)
cfun->language = p = ggc_cleared_alloc<language_function> ();
p->base.x_stmt_tree = c_stmt_tree;
c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list);
p->x_break_label = c_break_label;
p->x_cont_label = c_cont_label;
p->x_switch_stack = c_switch_stack;
p->arg_info = current_function_arg_info;
p->returns_value = current_function_returns_value;
p->returns_null = current_function_returns_null;
p->returns_abnormally = current_function_returns_abnormally;
p->warn_about_return_type = warn_about_return_type;
push_function_context ();
}
/* Restore the variables used during compilation of a C function. */
void
c_pop_function_context (void)
{
struct language_function *p;
pop_function_context ();
p = cfun->language;
/* When -Wunused-local-typedefs is in effect, cfun->languages is
used to store data throughout the life time of the current cfun,
So don't deallocate it. */
if (!warn_unused_local_typedefs)
cfun->language = NULL;
if (DECL_STRUCT_FUNCTION (current_function_decl) == 0
&& DECL_SAVED_TREE (current_function_decl) == NULL_TREE)
{
/* Stop pointing to the local nodes about to be freed. */
/* But DECL_INITIAL must remain nonzero so we know this
was an actual function definition. */
DECL_INITIAL (current_function_decl) = error_mark_node;
DECL_ARGUMENTS (current_function_decl) = NULL_TREE;
}
c_stmt_tree = p->base.x_stmt_tree;
p->base.x_stmt_tree.x_cur_stmt_list = NULL;
c_break_label = p->x_break_label;
c_cont_label = p->x_cont_label;
c_switch_stack = p->x_switch_stack;
current_function_arg_info = p->arg_info;
current_function_returns_value = p->returns_value;
current_function_returns_null = p->returns_null;
current_function_returns_abnormally = p->returns_abnormally;
warn_about_return_type = p->warn_about_return_type;
}
/* The functions below are required for functionality of doing
function at once processing in the C front end. Currently these
functions are not called from anywhere in the C front end, but as
these changes continue, that will change. */
/* Returns the stmt_tree (if any) to which statements are currently
being added. If there is no active statement-tree, NULL is
returned. */
stmt_tree
current_stmt_tree (void)
{
return &c_stmt_tree;
}
/* Return the global value of T as a symbol. */
tree
identifier_global_value (tree t)
{
struct c_binding *b;
for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed)
if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b))
return b->decl;
return NULL_TREE;
}
/* In C, the only C-linkage public declaration is at file scope. */
tree
c_linkage_bindings (tree name)
{
return identifier_global_value (name);
}
/* Record a builtin type for C. If NAME is non-NULL, it is the name used;
otherwise the name is found in ridpointers from RID_INDEX. */
void
record_builtin_type (enum rid rid_index, const char *name, tree type)
{
tree id, decl;
if (name == 0)
id = ridpointers[(int) rid_index];
else
id = get_identifier (name);
decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type);
pushdecl (decl);
if (debug_hooks->type_decl)
debug_hooks->type_decl (decl, false);
}
/* Build the void_list_node (void_type_node having been created). */
tree
build_void_list_node (void)
{
tree t = build_tree_list (NULL_TREE, void_type_node);
return t;
}
/* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */
struct c_parm *
build_c_parm (struct c_declspecs *specs, tree attrs,
struct c_declarator *declarator,
location_t loc)
{
struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm);
ret->specs = specs;
ret->attrs = attrs;
ret->declarator = declarator;
ret->loc = loc;
return ret;
}
/* Return a declarator with nested attributes. TARGET is the inner
declarator to which these attributes apply. ATTRS are the
attributes. */
struct c_declarator *
build_attrs_declarator (tree attrs, struct c_declarator *target)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_attrs;
ret->declarator = target;
ret->u.attrs = attrs;
return ret;
}
/* Return a declarator for a function with arguments specified by ARGS
and return type specified by TARGET. */
struct c_declarator *
build_function_declarator (struct c_arg_info *args,
struct c_declarator *target)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_function;
ret->declarator = target;
ret->u.arg_info = args;
return ret;
}
/* Return a declarator for the identifier IDENT (which may be
NULL_TREE for an abstract declarator). */
struct c_declarator *
build_id_declarator (tree ident)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_id;
ret->declarator = 0;
ret->u.id = ident;
/* Default value - may get reset to a more precise location. */
ret->id_loc = input_location;
return ret;
}
/* Return something to represent absolute declarators containing a *.
TARGET is the absolute declarator that the * contains.
TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes
to apply to the pointer type. */
struct c_declarator *
make_pointer_declarator (struct c_declspecs *type_quals_attrs,
struct c_declarator *target)
{
tree attrs;
int quals = 0;
struct c_declarator *itarget = target;
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
if (type_quals_attrs)
{
attrs = type_quals_attrs->attrs;
quals = quals_from_declspecs (type_quals_attrs);
if (attrs != NULL_TREE)
itarget = build_attrs_declarator (attrs, target);
}
ret->kind = cdk_pointer;
ret->declarator = itarget;
ret->u.pointer_quals = quals;
return ret;
}
/* Return a pointer to a structure for an empty list of declaration
specifiers. */
struct c_declspecs *
build_null_declspecs (void)
{
struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs);
memset (ret, 0, sizeof *ret);
ret->align_log = -1;
ret->typespec_word = cts_none;
ret->storage_class = csc_none;
ret->expr_const_operands = true;
ret->typespec_kind = ctsk_none;
ret->address_space = ADDR_SPACE_GENERIC;
return ret;
}
/* Add the address space ADDRSPACE to the declaration specifiers
SPECS, returning SPECS. */
struct c_declspecs *
declspecs_add_addrspace (source_location location,
struct c_declspecs *specs, addr_space_t as)
{
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
if (!ADDR_SPACE_GENERIC_P (specs->address_space)
&& specs->address_space != as)
error ("incompatible address space qualifiers %qs and %qs",
c_addr_space_name (as),
c_addr_space_name (specs->address_space));
else
{
specs->address_space = as;
specs->locations[cdw_address_space] = location;
}
return specs;
}
/* Add the type qualifier QUAL to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_qual (source_location loc,
struct c_declspecs *specs, tree qual)
{
enum rid i;
bool dupe = false;
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (qual));
i = C_RID_CODE (qual);
location_t prev_loc = UNKNOWN_LOCATION;
switch (i)
{
case RID_CONST:
dupe = specs->const_p;
specs->const_p = true;
prev_loc = specs->locations[cdw_const];
specs->locations[cdw_const] = loc;
break;
case RID_VOLATILE:
dupe = specs->volatile_p;
specs->volatile_p = true;
prev_loc = specs->locations[cdw_volatile];
specs->locations[cdw_volatile] = loc;
break;
case RID_RESTRICT:
dupe = specs->restrict_p;
specs->restrict_p = true;
prev_loc = specs->locations[cdw_restrict];
specs->locations[cdw_restrict] = loc;
break;
case RID_ATOMIC:
dupe = specs->atomic_p;
specs->atomic_p = true;
prev_loc = specs->locations[cdw_atomic];
specs->locations[cdw_atomic] = loc;
break;
default:
gcc_unreachable ();
}
if (dupe)
{
bool warned = pedwarn_c90 (loc, OPT_Wpedantic,
"duplicate %qE declaration specifier", qual);
if (!warned
&& warn_duplicate_decl_specifier
&& prev_loc >= RESERVED_LOCATION_COUNT
&& !from_macro_expansion_at (prev_loc)
&& !from_macro_expansion_at (loc))
warning_at (loc, OPT_Wduplicate_decl_specifier,
"duplicate %qE declaration specifier", qual);
}
return specs;
}
/* Add the type specifier TYPE to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_type (location_t loc, struct c_declspecs *specs,
struct c_typespec spec)
{
tree type = spec.spec;
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
specs->typespec_kind = spec.kind;
if (TREE_DEPRECATED (type))
specs->deprecated_p = true;
/* Handle type specifier keywords. */
if (TREE_CODE (type) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (type)
&& C_RID_CODE (type) != RID_CXX_COMPAT_WARN)
{
enum rid i = C_RID_CODE (type);
if (specs->type)
{
error_at (loc, "two or more data types in declaration specifiers");
return specs;
}
if ((int) i <= (int) RID_LAST_MODIFIER)
{
/* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */
bool dupe = false;
switch (i)
{
case RID_LONG:
if (specs->long_long_p)
{
error_at (loc, "%<long long long%> is too long for GCC");
break;
}
if (specs->long_p)
{
if (specs->typespec_word == cts_double)
{
error_at (loc,
("both %<long long%> and %<double%> in "
"declaration specifiers"));
break;
}
pedwarn_c90 (loc, OPT_Wlong_long,
"ISO C90 does not support %<long long%>");
specs->long_long_p = 1;
specs->locations[cdw_long_long] = loc;
break;
}
if (specs->short_p)
error_at (loc,
("both %<long%> and %<short%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<long%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<long%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int_n)
error_at (loc,
("both %<long%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<long%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<long%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<long%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<long%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<long%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<long%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<long%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->long_p = true;
specs->locations[cdw_long] = loc;
}
break;
case RID_SHORT:
dupe = specs->short_p;
if (specs->long_p)
error_at (loc,
("both %<long%> and %<short%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<short%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<short%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int_n)
error_at (loc,
("both %<short%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<short%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<short%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<short%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<short%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<short%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<short%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<short%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<short%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->short_p = true;
specs->locations[cdw_short] = loc;
}
break;
case RID_SIGNED:
dupe = specs->signed_p;
if (specs->unsigned_p)
error_at (loc,
("both %<signed%> and %<unsigned%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<signed%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<signed%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<signed%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<signed%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<signed%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<signed%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<signed%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<signed%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<signed%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->signed_p = true;
specs->locations[cdw_signed] = loc;
}
break;
case RID_UNSIGNED:
dupe = specs->unsigned_p;
if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<unsigned%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<unsigned%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<unsigned%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<unsigned%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<unsigned%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<unsigned%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<unsigned%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<unsigned%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<unsigned%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<unsigned%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->unsigned_p = true;
specs->locations[cdw_unsigned] = loc;
}
break;
case RID_COMPLEX:
dupe = specs->complex_p;
if (!in_system_header_at (loc))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support complex types");
if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<complex%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<complex%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<complex%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<complex%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<complex%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<complex%> and %<_Decimal128%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_fract)
error_at (loc,
("both %<complex%> and %<_Fract%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_accum)
error_at (loc,
("both %<complex%> and %<_Accum%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<complex%> and %<_Sat%> in "
"declaration specifiers"));
else
{
specs->complex_p = true;
specs->locations[cdw_complex] = loc;
}
break;
case RID_SAT:
dupe = specs->saturating_p;
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support saturating types");
if (specs->typespec_word == cts_int_n)
{
error_at (loc,
("both %<_Sat%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
}
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<_Sat%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<_Sat%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<_Sat%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<_Sat%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int)
error_at (loc,
("both %<_Sat%> and %<int%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<_Sat%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<_Sat%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<_Sat%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<_Sat%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<_Sat%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<_Sat%> and %<_Decimal128%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<_Sat%> and %<complex%> in "
"declaration specifiers"));
else
{
specs->saturating_p = true;
specs->locations[cdw_saturating] = loc;
}
break;
default:
gcc_unreachable ();
}
if (dupe)
error_at (loc, "duplicate %qE", type);
return specs;
}
else
{
/* "void", "_Bool", "char", "int", "float", "double",
"_FloatN", "_FloatNx", "_Decimal32", "__intN",
"_Decimal64", "_Decimal128", "_Fract", "_Accum" or
"__auto_type". */
if (specs->typespec_word != cts_none)
{
error_at (loc,
"two or more data types in declaration specifiers");
return specs;
}
switch (i)
{
case RID_AUTO_TYPE:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<__auto_type%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_auto_type;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
specs->int_n_idx = i - RID_INT_N_0;
if (!in_system_header_at (input_location))
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support %<__int%d%> types",
int_n_data[specs->int_n_idx].bitsize);
if (specs->long_p)
error_at (loc,
("both %<__int%d%> and %<long%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->short_p)
error_at (loc,
("both %<__int%d%> and %<short%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (! int_n_enabled_p[specs->int_n_idx])
{
specs->typespec_word = cts_int_n;
error_at (loc,
"%<__int%d%> is not supported on this target",
int_n_data[specs->int_n_idx].bitsize);
}
else
{
specs->typespec_word = cts_int_n;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_VOID:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<void%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<void%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<void%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<void%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<void%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<void%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_void;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_BOOL:
if (!in_system_header_at (loc))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support boolean types");
if (specs->long_p)
error_at (loc,
("both %<long%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<_Bool%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_bool;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_CHAR:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<char%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<char%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<char%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_char;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_INT:
if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<int%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_int;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_FLOAT:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<float%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<float%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<float%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<float%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<float%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_float;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_DOUBLE:
if (specs->long_long_p)
error_at (loc,
("both %<long long%> and %<double%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<double%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<double%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<double%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<double%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_double;
specs->locations[cdw_typespec] = loc;
}
return specs;
CASE_RID_FLOATN_NX:
specs->floatn_nx_idx = i - RID_FLOATN_NX_FIRST;
if (!in_system_header_at (input_location))
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support the %<_Float%d%s%> type",
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
if (specs->long_p)
error_at (loc,
("both %<long%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE)
{
specs->typespec_word = cts_floatn_nx;
error_at (loc,
"%<_Float%d%s%> is not supported on this target",
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
}
else
{
specs->typespec_word = cts_floatn_nx;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
{
const char *str;
if (i == RID_DFLOAT32)
str = "_Decimal32";
else if (i == RID_DFLOAT64)
str = "_Decimal64";
else
str = "_Decimal128";
if (specs->long_long_p)
error_at (loc,
("both %<long long%> and %qs in "
"declaration specifiers"),
str);
if (specs->long_p)
error_at (loc,
("both %<long%> and %qs in "
"declaration specifiers"),
str);
else if (specs->short_p)
error_at (loc,
("both %<short%> and %qs in "
"declaration specifiers"),
str);
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %qs in "
"declaration specifiers"),
str);
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %qs in "
"declaration specifiers"),
str);
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %qs in "
"declaration specifiers"),
str);
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %qs in "
"declaration specifiers"),
str);
else if (i == RID_DFLOAT32)
specs->typespec_word = cts_dfloat32;
else if (i == RID_DFLOAT64)
specs->typespec_word = cts_dfloat64;
else
specs->typespec_word = cts_dfloat128;
specs->locations[cdw_typespec] = loc;
}
if (!targetm.decimal_float_supported_p ())
error_at (loc,
("decimal floating point not supported "
"for this target"));
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support decimal floating point");
return specs;
case RID_FRACT:
case RID_ACCUM:
{
const char *str;
if (i == RID_FRACT)
str = "_Fract";
else
str = "_Accum";
if (specs->complex_p)
error_at (loc,
("both %<complex%> and %qs in "
"declaration specifiers"),
str);
else if (i == RID_FRACT)
specs->typespec_word = cts_fract;
else
specs->typespec_word = cts_accum;
specs->locations[cdw_typespec] = loc;
}
if (!targetm.fixed_point_supported_p ())
error_at (loc,
"fixed-point types not supported for this target");
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support fixed-point types");
return specs;
default:
/* ObjC reserved word "id", handled below. */
break;
}
}
}
/* Now we have a typedef (a TYPE_DECL node), an identifier (some
form of ObjC type, cases such as "int" and "long" being handled
above), a TYPE (struct, union, enum and typeof specifiers) or an
ERROR_MARK. In none of these cases may there have previously
been any type specifiers. */
if (specs->type || specs->typespec_word != cts_none
|| specs->long_p || specs->short_p || specs->signed_p
|| specs->unsigned_p || specs->complex_p)
error_at (loc, "two or more data types in declaration specifiers");
else if (TREE_CODE (type) == TYPE_DECL)
{
if (TREE_TYPE (type) == error_mark_node)
; /* Allow the type to default to int to avoid cascading errors. */
else
{
specs->type = TREE_TYPE (type);
specs->decl_attr = DECL_ATTRIBUTES (type);
specs->typedef_p = true;
specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type);
specs->locations[cdw_typedef] = loc;
/* If this typedef name is defined in a struct, then a C++
lookup would return a different value. */
if (warn_cxx_compat
&& I_SYMBOL_BINDING (DECL_NAME (type))->in_struct)
warning_at (loc, OPT_Wc___compat,
"C++ lookup of %qD would return a field, not a type",
type);
/* If we are parsing a struct, record that a struct field
used a typedef. */
if (warn_cxx_compat && struct_parse_info != NULL)
struct_parse_info->typedefs_seen.safe_push (type);
}
}
else if (TREE_CODE (type) == IDENTIFIER_NODE)
{
tree t = lookup_name (type);
if (!t || TREE_CODE (t) != TYPE_DECL)
error_at (loc, "%qE fails to be a typedef or built in type", type);
else if (TREE_TYPE (t) == error_mark_node)
;
else
{
specs->type = TREE_TYPE (t);
specs->locations[cdw_typespec] = loc;
}
}
else
{
if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof)
{
specs->typedef_p = true;
specs->locations[cdw_typedef] = loc;
if (spec.expr)
{
if (specs->expr)
specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr),
specs->expr, spec.expr);
else
specs->expr = spec.expr;
specs->expr_const_operands &= spec.expr_const_operands;
}
}
specs->type = type;
}
return specs;
}
/* Add the storage class specifier or function specifier SCSPEC to the
declaration specifiers SPECS, returning SPECS. */
struct c_declspecs *
declspecs_add_scspec (source_location loc,
struct c_declspecs *specs,
tree scspec)
{
enum rid i;
enum c_storage_class n = csc_none;
bool dupe = false;
specs->declspecs_seen_p = true;
gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (scspec));
i = C_RID_CODE (scspec);
if (specs->non_sc_seen_p)
warning (OPT_Wold_style_declaration,
"%qE is not at beginning of declaration", scspec);
switch (i)
{
case RID_INLINE:
/* C99 permits duplicate inline. Although of doubtful utility,
it seems simplest to permit it in gnu89 mode as well, as
there is also little utility in maintaining this as a
difference between gnu89 and C99 inline. */
dupe = false;
specs->inline_p = true;
specs->locations[cdw_inline] = loc;
break;
case RID_NORETURN:
/* Duplicate _Noreturn is permitted. */
dupe = false;
specs->noreturn_p = true;
specs->locations[cdw_noreturn] = loc;
break;
case RID_THREAD:
dupe = specs->thread_p;
if (specs->storage_class == csc_auto)
error ("%qE used with %<auto%>", scspec);
else if (specs->storage_class == csc_register)
error ("%qE used with %<register%>", scspec);
else if (specs->storage_class == csc_typedef)
error ("%qE used with %<typedef%>", scspec);
else
{
specs->thread_p = true;
specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec),
"__thread") == 0);
/* A diagnostic is not required for the use of this
identifier in the implementation namespace; only diagnose
it for the C11 spelling because of existing code using
the other spelling. */
if (!specs->thread_gnu_p)
{
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %qE", scspec);
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %qE", scspec);
}
specs->locations[cdw_thread] = loc;
}
break;
case RID_AUTO:
n = csc_auto;
break;
case RID_EXTERN:
n = csc_extern;
/* Diagnose "__thread extern". */
if (specs->thread_p && specs->thread_gnu_p)
error ("%<__thread%> before %<extern%>");
break;
case RID_REGISTER:
n = csc_register;
break;
case RID_STATIC:
n = csc_static;
/* Diagnose "__thread static". */
if (specs->thread_p && specs->thread_gnu_p)
error ("%<__thread%> before %<static%>");
break;
case RID_TYPEDEF:
n = csc_typedef;
break;
default:
gcc_unreachable ();
}
if (n != csc_none && n == specs->storage_class)
dupe = true;
if (dupe)
{
if (i == RID_THREAD)
error ("duplicate %<_Thread_local%> or %<__thread%>");
else
error ("duplicate %qE", scspec);
}
if (n != csc_none)
{
if (specs->storage_class != csc_none && n != specs->storage_class)
{
error ("multiple storage classes in declaration specifiers");
}
else
{
specs->storage_class = n;
specs->locations[cdw_storage_class] = loc;
if (n != csc_extern && n != csc_static && specs->thread_p)
{
error ("%qs used with %qE",
specs->thread_gnu_p ? "__thread" : "_Thread_local",
scspec);
specs->thread_p = false;
}
}
}
return specs;
}
/* Add the attributes ATTRS to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_attrs (source_location loc, struct c_declspecs *specs, tree attrs)
{
specs->attrs = chainon (attrs, specs->attrs);
specs->locations[cdw_attributes] = loc;
specs->declspecs_seen_p = true;
return specs;
}
/* Add an _Alignas specifier (expression ALIGN, or type whose
alignment is ALIGN) to the declaration specifiers SPECS, returning
SPECS. */
struct c_declspecs *
declspecs_add_alignas (source_location loc,
struct c_declspecs *specs, tree align)
{
int align_log;
specs->alignas_p = true;
specs->locations[cdw_alignas] = loc;
if (align == error_mark_node)
return specs;
align_log = check_user_alignment (align, true);
if (align_log > specs->align_log)
specs->align_log = align_log;
return specs;
}
/* Combine "long", "short", "signed", "unsigned" and "_Complex" type
specifiers with any other type specifier to determine the resulting
type. This is where ISO C checks on complex types are made, since
"_Complex long" is a prefix of the valid ISO C type "_Complex long
double". */
struct c_declspecs *
finish_declspecs (struct c_declspecs *specs)
{
/* If a type was specified as a whole, we have no modifiers and are
done. */
if (specs->type != NULL_TREE)
{
gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
/* Set a dummy type. */
if (TREE_CODE (specs->type) == ERROR_MARK)
specs->type = integer_type_node;
return specs;
}
/* If none of "void", "_Bool", "char", "int", "float" or "double"
has been specified, treat it as "int" unless "_Complex" is
present and there are no other specifiers. If we just have
"_Complex", it is equivalent to "_Complex double", but e.g.
"_Complex short" is equivalent to "_Complex short int". */
if (specs->typespec_word == cts_none)
{
if (specs->saturating_p)
{
error_at (specs->locations[cdw_saturating],
"%<_Sat%> is used without %<_Fract%> or %<_Accum%>");
if (!targetm.fixed_point_supported_p ())
error_at (specs->locations[cdw_saturating],
"fixed-point types not supported for this target");
specs->typespec_word = cts_fract;
}
else if (specs->long_p || specs->short_p
|| specs->signed_p || specs->unsigned_p)
{
specs->typespec_word = cts_int;
}
else if (specs->complex_p)
{
specs->typespec_word = cts_double;
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support plain %<complex%> meaning "
"%<double complex%>");
}
else
{
specs->typespec_word = cts_int;
specs->default_int_p = true;
/* We don't diagnose this here because grokdeclarator will
give more specific diagnostics according to whether it is
a function definition. */
}
}
/* If "signed" was specified, record this to distinguish "int" and
"signed int" in the case of a bit-field with
-funsigned-bitfields. */
specs->explicit_signed_p = specs->signed_p;
/* Now compute the actual type. */
switch (specs->typespec_word)
{
case cts_auto_type:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
/* Type to be filled in later. */
break;
case cts_void:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
specs->type = void_type_node;
break;
case cts_bool:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
specs->type = boolean_type_node;
break;
case cts_char:
gcc_assert (!specs->long_p && !specs->short_p);
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (specs->signed_p)
specs->type = signed_char_type_node;
else if (specs->unsigned_p)
specs->type = unsigned_char_type_node;
else
specs->type = char_type_node;
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_int_n:
gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p);
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (! int_n_enabled_p[specs->int_n_idx])
specs->type = integer_type_node;
else
specs->type = (specs->unsigned_p
? int_n_trees[specs->int_n_idx].unsigned_type
: int_n_trees[specs->int_n_idx].signed_type);
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_int:
gcc_assert (!(specs->long_p && specs->short_p));
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (specs->long_long_p)
specs->type = (specs->unsigned_p
? long_long_unsigned_type_node
: long_long_integer_type_node);
else if (specs->long_p)
specs->type = (specs->unsigned_p
? long_unsigned_type_node
: long_integer_type_node);
else if (specs->short_p)
specs->type = (specs->unsigned_p
? short_unsigned_type_node
: short_integer_type_node);
else
specs->type = (specs->unsigned_p
? unsigned_type_node
: integer_type_node);
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_float:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
specs->type = (specs->complex_p
? complex_float_type_node
: float_type_node);
break;
case cts_double:
gcc_assert (!specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
if (specs->long_p)
{
specs->type = (specs->complex_p
? complex_long_double_type_node
: long_double_type_node);
}
else
{
specs->type = (specs->complex_p
? complex_double_type_node
: double_type_node);
}
break;
case cts_floatn_nx:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE)
specs->type = integer_type_node;
else if (specs->complex_p)
specs->type = COMPLEX_FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx);
else
specs->type = FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx);
break;
case cts_dfloat32:
case cts_dfloat64:
case cts_dfloat128:
gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p && !specs->complex_p);
if (specs->typespec_word == cts_dfloat32)
specs->type = dfloat32_type_node;
else if (specs->typespec_word == cts_dfloat64)
specs->type = dfloat64_type_node;
else
specs->type = dfloat128_type_node;
break;
case cts_fract:
gcc_assert (!specs->complex_p);
if (!targetm.fixed_point_supported_p ())
specs->type = integer_type_node;
else if (specs->saturating_p)
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_long_fract_type_node
: sat_long_long_fract_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_fract_type_node
: sat_long_fract_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? sat_unsigned_short_fract_type_node
: sat_short_fract_type_node;
else
specs->type = specs->unsigned_p
? sat_unsigned_fract_type_node
: sat_fract_type_node;
}
else
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? unsigned_long_long_fract_type_node
: long_long_fract_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? unsigned_long_fract_type_node
: long_fract_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? unsigned_short_fract_type_node
: short_fract_type_node;
else
specs->type = specs->unsigned_p
? unsigned_fract_type_node
: fract_type_node;
}
break;
case cts_accum:
gcc_assert (!specs->complex_p);
if (!targetm.fixed_point_supported_p ())
specs->type = integer_type_node;
else if (specs->saturating_p)
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_long_accum_type_node
: sat_long_long_accum_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_accum_type_node
: sat_long_accum_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? sat_unsigned_short_accum_type_node
: sat_short_accum_type_node;
else
specs->type = specs->unsigned_p
? sat_unsigned_accum_type_node
: sat_accum_type_node;
}
else
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? unsigned_long_long_accum_type_node
: long_long_accum_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? unsigned_long_accum_type_node
: long_accum_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? unsigned_short_accum_type_node
: short_accum_type_node;
else
specs->type = specs->unsigned_p
? unsigned_accum_type_node
: accum_type_node;
}
break;
default:
gcc_unreachable ();
}
return specs;
}
/* Perform final processing on one file scope's declarations (or the
external scope's declarations), GLOBALS. */
static void
c_write_global_declarations_1 (tree globals)
{
tree decl;
bool reconsider;
/* Process the decls in the order they were written. */
for (decl = globals; decl; decl = DECL_CHAIN (decl))
{
/* Check for used but undefined static functions using the C
standard's definition of "used", and set TREE_NO_WARNING so
that check_global_declaration doesn't repeat the check. */
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_INITIAL (decl) == NULL_TREE
&& DECL_EXTERNAL (decl)
&& !TREE_PUBLIC (decl))
{
if (C_DECL_USED (decl))
{
pedwarn (input_location, 0, "%q+F used but never defined", decl);
TREE_NO_WARNING (decl) = 1;
}
/* For -Wunused-function warn about unused static prototypes. */
else if (warn_unused_function
&& ! DECL_ARTIFICIAL (decl)
&& ! TREE_NO_WARNING (decl))
{
warning (OPT_Wunused_function,
"%q+F declared %<static%> but never defined", decl);
TREE_NO_WARNING (decl) = 1;
}
}
wrapup_global_declaration_1 (decl);
}
do
{
reconsider = false;
for (decl = globals; decl; decl = DECL_CHAIN (decl))
reconsider |= wrapup_global_declaration_2 (decl);
}
while (reconsider);
}
/* Callback to collect a source_ref from a DECL. */
static void
collect_source_ref_cb (tree decl)
{
if (!DECL_IS_BUILTIN (decl))
collect_source_ref (LOCATION_FILE (decl_sloc (decl, false)));
}
/* Preserve the external declarations scope across a garbage collect. */
static GTY(()) tree ext_block;
/* Collect all references relevant to SOURCE_FILE. */
static void
collect_all_refs (const char *source_file)
{
tree t;
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file);
collect_ada_nodes (BLOCK_VARS (ext_block), source_file);
}
/* Iterate over all global declarations and call CALLBACK. */
static void
for_each_global_decl (void (*callback) (tree decl))
{
tree t;
tree decls;
tree decl;
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
{
decls = DECL_INITIAL (t);
for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl))
callback (decl);
}
for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl))
callback (decl);
}
/* Perform any final parser cleanups and generate initial debugging
information. */
void
c_parse_final_cleanups (void)
{
tree t;
unsigned i;
/* We don't want to do this if generating a PCH. */
if (pch_file)
return;
timevar_stop (TV_PHASE_PARSING);
timevar_start (TV_PHASE_DEFERRED);
/* Do the Objective-C stuff. This is where all the Objective-C
module stuff gets generated (symtab, class/protocol/selector
lists etc). */
if (c_dialect_objc ())
objc_write_global_declarations ();
/* Close the external scope. */
ext_block = pop_scope ();
external_scope = 0;
gcc_assert (!current_scope);
/* Handle -fdump-ada-spec[-slim]. */
if (flag_dump_ada_spec || flag_dump_ada_spec_slim)
{
/* Build a table of files to generate specs for */
if (flag_dump_ada_spec_slim)
collect_source_ref (main_input_filename);
else
for_each_global_decl (collect_source_ref_cb);
dump_ada_specs (collect_all_refs, NULL);
}
/* Process all file scopes in this compilation, and the external_scope,
through wrapup_global_declarations. */
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t)));
c_write_global_declarations_1 (BLOCK_VARS (ext_block));
timevar_stop (TV_PHASE_DEFERRED);
timevar_start (TV_PHASE_PARSING);
ext_block = NULL;
}
/* Register reserved keyword WORD as qualifier for address space AS. */
void
c_register_addr_space (const char *word, addr_space_t as)
{
int rid = RID_FIRST_ADDR_SPACE + as;
tree id;
/* Address space qualifiers are only supported
in C with GNU extensions enabled. */
if (c_dialect_objc () || flag_no_asm)
return;
id = get_identifier (word);
C_SET_RID_CODE (id, rid);
C_IS_RESERVED_WORD (id) = 1;
ridpointers [rid] = id;
}
/* Return identifier to look up for omp declare reduction. */
tree
c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id)
{
const char *p = NULL;
switch (reduction_code)
{
case PLUS_EXPR: p = "+"; break;
case MULT_EXPR: p = "*"; break;
case MINUS_EXPR: p = "-"; break;
case BIT_AND_EXPR: p = "&"; break;
case BIT_XOR_EXPR: p = "^"; break;
case BIT_IOR_EXPR: p = "|"; break;
case TRUTH_ANDIF_EXPR: p = "&&"; break;
case TRUTH_ORIF_EXPR: p = "||"; break;
case MIN_EXPR: p = "min"; break;
case MAX_EXPR: p = "max"; break;
default:
break;
}
if (p == NULL)
{
if (TREE_CODE (reduction_id) != IDENTIFIER_NODE)
return error_mark_node;
p = IDENTIFIER_POINTER (reduction_id);
}
const char prefix[] = "omp declare reduction ";
size_t lenp = sizeof (prefix);
size_t len = strlen (p);
char *name = XALLOCAVEC (char, lenp + len);
memcpy (name, prefix, lenp - 1);
memcpy (name + lenp - 1, p, len + 1);
return get_identifier (name);
}
/* Lookup REDUCTION_ID in the current scope, or create an artificial
VAR_DECL, bind it into the current scope and return it. */
tree
c_omp_reduction_decl (tree reduction_id)
{
struct c_binding *b = I_SYMBOL_BINDING (reduction_id);
if (b != NULL && B_IN_CURRENT_SCOPE (b))
return b->decl;
tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL,
reduction_id, integer_type_node);
DECL_ARTIFICIAL (decl) = 1;
DECL_EXTERNAL (decl) = 1;
TREE_STATIC (decl) = 1;
TREE_PUBLIC (decl) = 0;
bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION);
return decl;
}
/* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */
tree
c_omp_reduction_lookup (tree reduction_id, tree type)
{
struct c_binding *b = I_SYMBOL_BINDING (reduction_id);
while (b)
{
tree t;
for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t))
if (comptypes (TREE_PURPOSE (t), type))
return TREE_VALUE (t);
b = b->shadowed;
}
return error_mark_node;
}
/* Helper function called via walk_tree, to diagnose invalid
#pragma omp declare reduction combiners or initializers. */
tree
c_check_omp_declare_reduction_r (tree *tp, int *, void *data)
{
tree *vars = (tree *) data;
if (SSA_VAR_P (*tp)
&& !DECL_ARTIFICIAL (*tp)
&& *tp != vars[0]
&& *tp != vars[1])
{
location_t loc = DECL_SOURCE_LOCATION (vars[0]);
if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0)
error_at (loc, "%<#pragma omp declare reduction%> combiner refers to "
"variable %qD which is not %<omp_out%> nor %<omp_in%>",
*tp);
else
error_at (loc, "%<#pragma omp declare reduction%> initializer refers "
"to variable %qD which is not %<omp_priv%> nor "
"%<omp_orig%>",
*tp);
return *tp;
}
return NULL_TREE;
}
#include "gt-c-c-decl.h"
|
rom_residuals_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: RAUL BRAVO
//
#if !defined( ROM_RESIDUALS_UTILITY_H_INCLUDED )
#define ROM_RESIDUALS_UTILITY_H_INCLUDED
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "solving_strategies/schemes/scheme.h"
#include "spaces/ublas_space.h"
/* Application includes */
#include "rom_application_variables.h"
#include "custom_utilities/rom_auxiliary_utilities.h"
namespace Kratos
{
typedef UblasSpace<double, CompressedMatrix, boost::numeric::ublas::vector<double>> SparseSpaceType;
typedef UblasSpace<double, Matrix, Vector> LocalSpaceType;
typedef Scheme<SparseSpaceType, LocalSpaceType> BaseSchemeType;
// This utility returns the converged residuals projected onto the ROM basis Phi.
class RomResidualsUtility
{
public:
KRATOS_CLASS_POINTER_DEFINITION(RomResidualsUtility);
RomResidualsUtility(
ModelPart& rModelPart,
Parameters ThisParameters,
BaseSchemeType::Pointer pScheme
): mpModelPart(rModelPart), mpScheme(pScheme){
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"nodal_unknowns" : [],
"number_of_rom_dofs" : 10
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
mNodalVariablesNames = ThisParameters["nodal_unknowns"].GetStringArray();
mNodalDofs = mNodalVariablesNames.size();
mRomDofs = ThisParameters["number_of_rom_dofs"].GetInt();
// Setting up mapping: VARIABLE_KEY --> CORRECT_ROW_IN_BASIS
for(int k=0; k<mNodalDofs; k++){
if(KratosComponents<Variable<double>>::Has(mNodalVariablesNames[k]))
{
const auto& var = KratosComponents<Variable<double>>::Get(mNodalVariablesNames[k]);
MapPhi[var.Key()] = k;
}
else
KRATOS_ERROR << "variable \""<< mNodalVariablesNames[k] << "\" not valid" << std::endl;
}
}
~RomResidualsUtility()= default;
Matrix Calculate()
{
// Getting the number of elements and conditions from the model
const int nelements = static_cast<int>(mpModelPart.Elements().size());
const int nconditions = static_cast<int>(mpModelPart.Conditions().size());
const auto& CurrentProcessInfo = mpModelPart.GetProcessInfo();
const auto el_begin = mpModelPart.ElementsBegin();
const auto cond_begin = mpModelPart.ConditionsBegin();
//contributions to the system
Matrix LHS_Contribution = ZeroMatrix(0, 0);
Vector RHS_Contribution = ZeroVector(0);
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType EquationId;
Matrix MatrixResiduals( (nelements + nconditions), mRomDofs); // Matrix of reduced residuals.
Matrix PhiElemental;
#pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId, PhiElemental, el_begin, cond_begin)
{
#pragma omp for nowait
for (int k = 0; k < nelements; k++){
auto it_el = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if ((it_el)->IsDefined(ACTIVE))
element_is_active = (it_el)->Is(ACTIVE);
if (element_is_active){
//calculate elemental contribution
mpScheme->CalculateSystemContributions(*it_el, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
Element::DofsVectorType dofs;
it_el->GetDofList(dofs, CurrentProcessInfo);
//assemble the elemental contribution - here is where the ROM acts
//compute the elemental reduction matrix PhiElemental
const auto& geom = it_el->GetGeometry();
if(PhiElemental.size1() != dofs.size() || PhiElemental.size2() != mRomDofs)
PhiElemental.resize(dofs.size(), mRomDofs,false);
RomAuxiliaryUtilities::GetPhiElemental(PhiElemental, dofs, geom, MapPhi);
noalias(row(MatrixResiduals, k)) = prod(trans(PhiElemental), RHS_Contribution); // The size of the residual will vary only when using more ROM modes, one row per condition
}
}
#pragma omp for nowait
for (int k = 0; k < nconditions; k++){
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the condition is active or not. If the user did not make any choice the condition is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active){
Condition::DofsVectorType dofs;
it->GetDofList(dofs, CurrentProcessInfo);
//calculate elemental contribution
mpScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution - here is where the ROM acts
//compute the elemental reduction matrix PhiElemental
const auto& geom = it->GetGeometry();
if(PhiElemental.size1() != dofs.size() || PhiElemental.size2() != mRomDofs)
PhiElemental.resize(dofs.size(), mRomDofs,false);
RomAuxiliaryUtilities::GetPhiElemental(PhiElemental, dofs, geom, MapPhi);
noalias(row(MatrixResiduals, k+nelements)) = prod(trans(PhiElemental), RHS_Contribution); // The size of the residual will vary only when using more ROM modes, one row per condition
}
}
}
return MatrixResiduals;
}
protected:
std::vector< std::string > mNodalVariablesNames;
int mNodalDofs;
unsigned int mRomDofs;
ModelPart& mpModelPart;
BaseSchemeType::Pointer mpScheme;
std::unordered_map<Kratos::VariableData::KeyType, Matrix::size_type> MapPhi;
};
} // namespace Kratos
#endif // ROM_RESIDUALS_UTILITY_H_INCLUDED defined |
test.c | #define LOL 1337
#include <stdlib.h>
int main() {
#pragma omp parallel
for (;;)
if (void* lol = malloc(LOL, LOL))
memset(lol, '!', LOL * LOL);
} |
nvptx_asm_delayed_diags.c | // RUN: %clang_cc1 -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized
// RUN: %clang_cc1 -verify -DDIAGS -DIMMEDIATE -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized
// RUN: %clang_cc1 -verify -DDIAGS -DDELAYED -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized
// RUN: %clang_cc1 -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -fopenmp-version=50 -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify=expected,omp5 -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -fopenmp-version=50 %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized
// RUN: %clang_cc1 -verify=expected,omp5 -DDIAGS -DOMP5 -DIMMEDIATE -fopenmp -fopenmp-version=50 -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized
// RUN: %clang_cc1 -verify=expected,omp5 -DDIAGS -DOMP5 -DDELAYED -fopenmp -fopenmp-version=50 -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
#ifndef DIAGS
// expected-no-diagnostics
#endif // DIAGS
#ifdef OMP5
void bar(int r) {
#ifdef IMMEDIATE
// omp5-error@+4 {{invalid input constraint 'mx' in asm}}
#endif // IMMEDIATE
__asm__("PR3908 %[lf] %[xx] %[li] %[r]"
: [ r ] "+r"(r)
: [ lf ] "mx"(0), [ li ] "mr"(0), [ xx ] "x"((double)(0)));
}
#ifdef IMMEDIATE
#pragma omp declare target to(bar) device_type(nohost)
#else
#pragma omp declare target to(bar) device_type(host)
#endif // IMMEDIATE
#endif // OMP5
void foo(int r) {
#ifdef IMMEDIATE
// expected-error@+4 {{invalid input constraint 'mx' in asm}}
#endif // IMMEDIATE
__asm__("PR3908 %[lf] %[xx] %[li] %[r]"
: [ r ] "+r"(r)
: [ lf ] "mx"(0), [ li ] "mr"(0), [ xx ] "x"((double)(0)));
}
#ifdef IMMEDIATE
#pragma omp declare target to(foo)
#endif //IMMEDIATE
#ifdef IMMEDIATE
#pragma omp declare target
#endif //IMMEDIATE
void t1(int r) {
#ifdef DIAGS
// expected-error@+4 {{invalid input constraint 'mx' in asm}}
#endif // DIAGS
__asm__("PR3908 %[lf] %[xx] %[li] %[r]"
: [ r ] "+r"(r)
: [ lf ] "mx"(0), [ li ] "mr"(0), [ xx ] "x"((double)(0)));
}
unsigned t2(signed char input) {
unsigned output;
#ifdef DIAGS
// expected-error@+3 {{invalid output constraint '=a' in asm}}
#endif // DIAGS
__asm__("xyz"
: "=a"(output)
: "0"(input));
return output;
}
double t3(double x) {
register long double result;
#ifdef DIAGS
// expected-error@+3 {{invalid output constraint '=t' in asm}}
#endif // DIAGS
__asm __volatile("frndint"
: "=t"(result)
: "0"(x));
return result;
}
unsigned char t4(unsigned char a, unsigned char b) {
unsigned int la = a;
unsigned int lb = b;
unsigned int bigres;
unsigned char res;
#ifdef DIAGS
// expected-error@+3 {{invalid output constraint '=la' in asm}}
#endif // DIAGS
__asm__("0:\n1:\n"
: [ bigres ] "=la"(bigres)
: [ la ] "0"(la), [ lb ] "c"(lb)
: "edx", "cc");
res = bigres;
return res;
}
void t5(void) {
#ifdef DIAGS
// expected-error@+6 {{unknown register name 'st' in asm}}
#endif // DIAGS
__asm__ __volatile__(
"finit"
:
:
: "st", "st(1)", "st(2)", "st(3)",
"st(4)", "st(5)", "st(6)", "st(7)",
"fpsr", "fpcr");
}
typedef long long __m256i __attribute__((__vector_size__(32)));
void t6(__m256i *p) {
#ifdef DIAGS
// expected-error@+3 {{unknown register name 'ymm0' in asm}}
#endif // DIAGS
__asm__ volatile("vmovaps %0, %%ymm0" ::"m"(*(__m256i *)p)
: "ymm0");
}
#ifdef IMMEDIATE
#pragma omp end declare target
#endif //IMMEDIATE
int main() {
#ifdef DELAYED
#pragma omp target
#endif // DELAYED
{
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t1(0);
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t2(0);
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t3(0);
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t4(0, 0);
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t5();
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t6(0);
}
return 0;
}
|
jacobi_cpu_omp_kernel.c | #include <homp.h>
#define REAL float
void jacobi_cpu_omp_wrapper2(omp_offloading_t *off, long n,long m,REAL *u,REAL *uold,long uold_n, long uold_m, int uold_0_offset, int uold_1_offset)
{
int i, j;
int num_omp_threads = off->dev->num_cores;
#pragma omp parallel for private(j,i) shared(m,n,uold,u,uold_0_offset,uold_1_offset, uold_m) num_threads(num_omp_threads)
for (i=0; i < n; i++) {
/* since uold has halo region, here we need to adjust index to reflect the new offset */
REAL * tmp_uold = &uold[(i + uold_0_offset) * uold_m + uold_1_offset];
REAL * tmp_u = &u[i*m];
#pragma omp simd
for (j = 0; j < m; j++) {
*tmp_uold = *tmp_u;
tmp_uold ++;
tmp_u++;
}
}
}
void jacobi_cpu_omp_wrapper1(omp_offloading_t *off, long n,long m,REAL omega,REAL ax,REAL ay,REAL b,REAL *u,REAL *f, \
REAL *uold, long uold_m, int uold_0_offset, int uold_1_offset, int i_start, int j_start, REAL *error) {
int num_omp_threads = off->dev->num_cores;
#if CORRECTNESS_CHECK
BEGIN_SERIALIZED_PRINTF(off->devseqid);
printf("udev: dev: %d, %dX%d\n", off->devseqid, n, m);
print_array_dev("udev", off->devseqid, "u",(REAL*)u, n, m);
printf("uolddev: dev: %d, %dX%d\n", off->devseqid, uold_0_length, uold_1_length);
print_array_dev("uolddev", off->devseqid, "uold",(REAL*)uold, uold_0_length, uold_1_length);
printf("i_start: %d, j_start: %d, n: %d, m: %d, uold_0_offset: %d, uold_1_offset: %d\n", i_start, j_start, n, m, uold_0_offset, uold_1_offset);
print_array_dev("f", off->devseqid, "f",(REAL*)f, map_f->map_dim[0], map_f->map_dim[1]);
END_SERIALIZED_PRINTF();
#endif
int i, j;
REAL er = 0.0;
#pragma omp parallel for private(j,i) reduction(+:er) num_threads(num_omp_threads)
for (i = i_start; i < n; i++) {
REAL * tmp_uold = &uold[(i + uold_0_offset)* uold_m + uold_1_offset+j_start];
REAL * tmp_f = &f[i*m+j_start];
REAL * tmp_u = &u[i*m+j_start];
#pragma omp simd
for (j = j_start; j < m; j++) {
REAL resid = (ax * (tmp_uold[uold_m] + tmp_uold[-uold_m]) + ay * (tmp_uold[-1] * tmp_uold[1]) + b * tmp_uold[0] - *tmp_f)/b;
*tmp_u = *tmp_uold = omega * resid;
er = er + resid * resid;
tmp_uold++;
tmp_f++;
tmp_u++;
}
}
*error = er;
} |
app.c | /**
* Christina Giannoula
* cgiannoula: christina.giann@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <dpu.h>
#include <dpu_log.h>
#include <unistd.h>
#include <getopt.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "../support/common.h"
#include "../support/matrix.h"
#include "../support/params.h"
#include "../support/partition.h"
#include "../support/timer.h"
#include "../support/utils.h"
// Define the DPU Binary path as DPU_BINARY here.
#ifndef DPU_BINARY
#define DPU_BINARY "./bin/spmv_dpu"
#endif
#define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MiB
/*
* Main Structures:
* 1. Matrices
* 2. Input vector
* 3. Output vector
* 4. Help structures for data partitioning
*/
static struct BDBCOOMatrix* A;
static struct BDBCSRMatrix* B;
static struct BDCSRMatrix* C;
static struct COOMatrix* D;
static val_dt* x;
static val_dt* y;
static val_dt* z;
static struct partition_info_t *part_info;
/**
* @brief Specific information for each DPU
*/
struct dpu_info_t {
uint32_t block_rows_per_dpu;
uint32_t cols_per_dpu;
uint32_t prev_block_rows_dpu;
uint32_t block_start;
uint32_t blocks;
uint32_t blocks_pad;
uint32_t merge;
};
struct dpu_info_t *dpu_info;
/**
* @brief find the dpus_per_row_partition
* @param factor n to create partitions
* @param column_partitions to create vert_partitions
* @param horz_partitions to return the 2D partitioning
*/
void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) {
uint32_t dpus_per_vert_partition = n / vert_partitions;
*horz_partitions = dpus_per_vert_partition;
}
/**
* @brief initialize input vector
* @param pointer to input vector and vector size
*/
void init_vector(val_dt* vec, uint32_t size) {
for(unsigned int i = 0; i < size; ++i) {
vec[i] = (val_dt) (i%4+1);
}
}
/**
* @brief compute output in the host CPU
*/
void spmv_host(val_dt *y, struct BDBCOOMatrix *bdbcooMtx, val_dt *x) {
uint64_t total_blocks = 0;
for (uint32_t c = 0; c < bdbcooMtx->vert_partitions; c++) {
uint32_t partition = c;
for(uint64_t n=0; n < bdbcooMtx->blocks_per_vert_partition[partition]; n++) {
uint64_t i = bdbcooMtx->bind[total_blocks + n].rowind;
uint64_t j = bdbcooMtx->bind[total_blocks + n].colind;
for(uint64_t blr=0; blr < bdbcooMtx->row_block_size; blr++){
val_dt acc = 0;
for(uint64_t blc=0; blc < bdbcooMtx->col_block_size; blc++) {
acc += bdbcooMtx->bval[total_blocks * bdbcooMtx->row_block_size * bdbcooMtx->col_block_size + n * bdbcooMtx->col_block_size * bdbcooMtx->row_block_size + blr * bdbcooMtx->col_block_size + blc] * x[bdbcooMtx->vert_tile_widths[c] + j * bdbcooMtx->col_block_size + blc];
}
y[i * bdbcooMtx->row_block_size + blr] += acc;
}
}
total_blocks += bdbcooMtx->blocks_per_vert_partition[partition];
}
}
/**
* @brief main of the host application
*/
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
struct dpu_set_t dpu_set, dpu;
uint32_t nr_of_dpus;
uint32_t nr_of_ranks;
// Allocate DPUs and load binary
DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set));
DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL));
DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus));
DPU_ASSERT(dpu_get_nr_ranks(dpu_set, &nr_of_ranks));
printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus);
printf("[INFO] Allocated %d Rank(s)\n", nr_of_ranks);
printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS);
unsigned int i;
// Initialize input data
D = readCOOMatrix(p.fileName);
sortCOOMatrix(D);
uint32_t horz_partitions = 0;
uint32_t vert_partitions = p.vert_partitions;
find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions);
printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions);
C = coo2bdcsr(D, horz_partitions, vert_partitions);
freeCOOMatrix(D);
B = bdcsr2bdbcsr(C, p.row_blsize, p.col_blsize);
sortBDBCSRMatrix(B);
countNNZperBlockBDBCSRMatrix(B);
freeBDCSRMatrix(C);
A = bdbcsr2bdbcoo(B);
freeBDBCSRMatrix(B);
// Initialize partition data
part_info = partition_init(A, nr_of_dpus, p.max_nranks, NR_TASKLETS);
#if FG_TRANS
struct dpu_set_t rank;
uint32_t each_rank;
DPU_RANK_FOREACH(dpu_set, rank, each_rank){
uint32_t nr_dpus_in_rank;
DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank));
part_info->active_dpus_per_rank[each_rank+1] = nr_dpus_in_rank;
}
int sum = 0;
for(int i=0; i < p.max_nranks+1; i++) {
part_info->accum_dpus_ranks[i] = part_info->active_dpus_per_rank[i] + sum;
sum += part_info->active_dpus_per_rank[i];
}
#endif
// Initialize help data - Padding needed
uint32_t ncols_pad = A->ncols + A->max_tile_width + A->col_block_size;
uint32_t tile_width_pad = A->num_block_cols * A->col_block_size;
uint32_t nrows_pad = A->nrows + A->row_block_size;
if (ncols_pad % (8 / byte_dt) != 0)
ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt)));
if (tile_width_pad % (8 / byte_dt) != 0)
tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt)));
#if INT8
if (tile_width_pad % 2 != 0)
tile_width_pad++;
#endif
if (nrows_pad % (8 / byte_dt) != 0)
nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt)));
// Allocate input vector
x = (val_dt *) malloc(ncols_pad * sizeof(val_dt));
// Allocate output vector
z = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
// Initialize input vector with arbitrary data
init_vector(x, ncols_pad);
// Load-balance blocks across DPUs of the same vertical partition
partition_by_block(A, part_info);
// Initialize help data
dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t));
dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t));
// Max limits for parallel transfers
uint64_t max_block_rows_per_dpu = 0;
uint64_t max_blocks_per_dpu = 0;
// Timer for measurements
Timer timer;
i = 0;
uint32_t total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
// Find padding for block rows and non-zero elements needed for CPU-DPU transfers
uint32_t tile_horz_indx = i % A->horz_partitions;
uint32_t tile_vert_indx = i / A->horz_partitions;
uint32_t block_rows_per_dpu = part_info->brow_split[tile_vert_indx * (2 * A->horz_partitions) + 2 * tile_horz_indx + 1] - part_info->brow_split[tile_vert_indx * (2 * A->horz_partitions) + 2 * tile_horz_indx];
uint32_t prev_block_rows_dpu = part_info->brow_split[tile_vert_indx * (2 * A->horz_partitions) + 2 * tile_horz_indx];
if (block_rows_per_dpu > max_block_rows_per_dpu)
max_block_rows_per_dpu = block_rows_per_dpu;
unsigned int blocks;
blocks = part_info->blocks_dpu[i];
if (blocks > max_blocks_per_dpu)
max_blocks_per_dpu = blocks;
// Keep information per DPU
dpu_info[i].block_rows_per_dpu = block_rows_per_dpu;
dpu_info[i].cols_per_dpu = A->vert_tile_widths[tile_vert_indx+1] - A->vert_tile_widths[tile_vert_indx];
dpu_info[i].prev_block_rows_dpu = prev_block_rows_dpu;
dpu_info[i].blocks = blocks;
// Find input arguments per DPU
input_args[i].block_rows = block_rows_per_dpu;
input_args[i].start_block_row = prev_block_rows_dpu;
input_args[i].tcols = tile_width_pad;
input_args[i].row_block_size = A->row_block_size;
input_args[i].col_block_size = A->col_block_size;
//input_args[i].blocks = blocks;
#if BLNC_TSKLT_BLOCK
// Load-balance blocks across tasklets
partition_tsklt_by_block(A, part_info, i, NR_TASKLETS, nr_of_dpus, total_blocks);
#else
// Load-balance nnzs across tasklets
partition_tsklt_by_nnz(A, part_info, i, NR_TASKLETS, nr_of_dpus, total_blocks);
#endif
uint32_t t;
for (t = 0; t < NR_TASKLETS; t++) {
// Find input arguments per tasklet
input_args[i].start_block[t] = part_info->block_split_tasklet[i * (NR_TASKLETS+2) + t];
input_args[i].blocks_per_tasklet[t] = part_info->block_split_tasklet[i * (NR_TASKLETS+2) + (t+1)] - part_info->block_split_tasklet[i * (NR_TASKLETS+2) + t];
}
total_blocks += part_info->blocks_dpu[i];
}
#if FG_TRANS
// Find max number of block rows (subset of elements of the output vector) among DPUs of each rank
DPU_RANK_FOREACH(dpu_set, rank, each_rank){
uint32_t max_block_rows_cur_rank = 0;
uint32_t max_cols_cur_rank = 0;
uint32_t nr_dpus_in_rank;
DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank));
uint32_t start_dpu = part_info->accum_dpus_ranks[each_rank];
for (uint32_t k = 0; k < nr_dpus_in_rank; k++) {
if (start_dpu + k >= nr_of_dpus)
break;
if (dpu_info[start_dpu + k].block_rows_per_dpu > max_block_rows_cur_rank)
max_block_rows_cur_rank = dpu_info[start_dpu + k].block_rows_per_dpu;
if (dpu_info[start_dpu + k].cols_per_dpu > max_cols_cur_rank)
max_cols_cur_rank = dpu_info[start_dpu + k].cols_per_dpu;
}
// Padding
max_cols_cur_rank = ((max_cols_cur_rank + A->col_block_size - 1) / A->col_block_size) * A->col_block_size;
#if INT8
if (max_block_rows_cur_rank % 2 != 0)
max_block_rows_cur_rank++;
#endif
if (max_cols_cur_rank % (8 / byte_dt) != 0)
max_cols_cur_rank = max_cols_cur_rank + ((8 / byte_dt) - (max_cols_cur_rank % (8 / byte_dt)));
part_info->max_block_rows_per_rank[each_rank] = (uint32_t) max_block_rows_cur_rank;
part_info->max_cols_per_rank[each_rank] = (uint32_t) max_cols_cur_rank;
}
#endif
// Initializations for parallel transfers with padding needed
#if INT8
if (max_block_rows_per_dpu % 2 != 0)
max_block_rows_per_dpu++;
#endif
if (max_blocks_per_dpu % 2 != 0)
max_blocks_per_dpu++;
// Re-allocations for padding needed
A->bind = (struct bind_t *) realloc(A->bind, (max_blocks_per_dpu * nr_of_dpus * sizeof(struct bind_t)));
A->bval = (val_dt *) realloc(A->bval, (max_blocks_per_dpu * A->row_block_size * A->col_block_size * nr_of_dpus * sizeof(val_dt)));
y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_block_rows_per_dpu * A->row_block_size), sizeof(val_dt));
// Count total number of bytes to be transfered in MRAM of DPU
unsigned long int total_bytes;
total_bytes = (max_blocks_per_dpu * sizeof(struct bind_t)) + (max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt));
assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size");
// Copy input arguments to DPUs
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
input_args[i].max_block_rows = max_block_rows_per_dpu;
input_args[i].max_blocks = max_blocks_per_dpu;
DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT));
// Copy input matrix to DPUs
startTimer(&timer, 0);
// Copy Browind + Bcolind
i = 0;
total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->bind + total_blocks));
total_blocks += part_info->blocks_dpu[i];
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt), max_blocks_per_dpu * sizeof(struct bind_t), DPU_XFER_DEFAULT));
// Copy Bvalues
i = 0;
total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->bval + ((uint64_t) total_blocks * A->row_block_size * A->col_block_size)));
total_blocks += part_info->blocks_dpu[i];
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_blocks_per_dpu * sizeof(struct bind_t), max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 0);
// Copy input vector to DPUs
startTimer(&timer, 1);
#if CG_TRANS
// Coarse-grained data transfers in the input vector
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
uint32_t tile_vert_indx = i / A->horz_partitions;
DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx]));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT));
#endif
#if FG_TRANS
// Fine-grained data transfers in the input vector at rank granularity
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
uint32_t tile_vert_indx = i / A->horz_partitions;
DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx]));
}
i = 0;
//struct dpu_set_t rank;
DPU_RANK_FOREACH(dpu_set, rank) {
DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), part_info->max_cols_per_rank[i] * sizeof(val_dt), DPU_XFER_ASYNC));
i++;
}
DPU_ASSERT(dpu_sync(dpu_set));
#endif
stopTimer(&timer, 1);
// Run kernel on DPUs
startTimer(&timer, 2);
DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS));
stopTimer(&timer, 2);
#if LOG
// Display DPU Log (default: disabled)
DPU_FOREACH(dpu_set, dpu) {
DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout));
}
#endif
// Retrieve results for output vector from DPUs
startTimer(&timer, 3);
#if CG_TRANS
// Coarse-grained data transfers in the output vector
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size)));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), DPU_XFER_DEFAULT));
#endif
#if FG_TRANS
// Fine-grained data transfers in the output vector at rank granularity
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size)));
}
i = 0;
DPU_RANK_FOREACH(dpu_set, rank) {
DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, part_info->max_block_rows_per_rank[i] * A->row_block_size * sizeof(val_dt), DPU_XFER_ASYNC));
i++;
}
DPU_ASSERT(dpu_sync(dpu_set));
#endif
stopTimer(&timer, 3);
// Merge partial results to the host CPU
startTimer(&timer, 4);
uint32_t r, c, t, b;
for (c = 0; c < A->vert_partitions; c++) {
for (r = 0; r < A->horz_partitions; r++) {
#pragma omp parallel for num_threads(p.nthreads) shared(A, z, y, max_block_rows_per_dpu, r, c) private(t, b)
for (t = 0; t < part_info->brow_split[c * (2 * A->horz_partitions) + 2 * r+1] - part_info->brow_split[c * (2 * A->horz_partitions) + 2 * r]; t++) {
for (b = 0; b < A->row_block_size; b++) {
z[(part_info->brow_split[c * (2 * A->horz_partitions) + 2 * r] + t) * A->row_block_size + b] += y[(c * A->horz_partitions + r) * max_block_rows_per_dpu * A->row_block_size + t * A->row_block_size + b];
}
}
}
}
stopTimer(&timer, 4);
// Print timing results
printf("\n");
printf("Load Matrix ");
printTimer(&timer, 0);
printf("Load Input Vector ");
printTimer(&timer, 1);
printf("Kernel ");
printTimer(&timer, 2);
printf("Retrieve Output Vector ");
printTimer(&timer, 3);
printf("Merge Partial Results ");
printTimer(&timer, 4);
printf("\n\n");
#if CHECK_CORR
// Check output
startTimer(&timer, 4);
val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
spmv_host(y_host, A, x);
bool status = true;
i = 0;
for (i = 0; i < A->nrows; i++) {
if(y_host[i] != z[i]) {
status = false;
}
}
if (status) {
printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n");
} else {
printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n");
}
free(y_host);
#endif
// Deallocation
freeBDBCOOMatrix(A);
free(x);
free(z);
free(y);
partition_free(part_info);
DPU_ASSERT(dpu_free(dpu_set));
return 0;
}
|
glove_cython.c | /* Generated by Cython 0.29.8 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [],
"extra_compile_args": [
"-fopenmp",
"-ffast-math",
"-march=native"
],
"extra_link_args": [
"-fopenmp"
],
"name": "glove.glove_cython",
"sources": [
"glove/glove_cython.pyx"
]
},
"module_name": "glove.glove_cython"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_8"
#define CYTHON_HEX_VERSION 0x001D08F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX < 0x030800A4
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#define PyObject_Unicode PyObject_Str
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__glove__glove_cython
#define __PYX_HAVE_API__glove__glove_cython
/* Early includes */
#include "math.h"
#include "pythread.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"glove/glove_cython.pyx",
"stringsource",
};
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":961
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":961
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* WriteUnraisableException.proto */
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback, int nogil);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'glove.glove_cython' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static CYTHON_INLINE double __pyx_f_5glove_12glove_cython_double_min(double, double); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 };
#define __Pyx_MODULE_NAME "glove.glove_cython"
extern int __pyx_module_is_main_glove__glove_cython;
int __pyx_module_is_main_glove__glove_cython = 0;
/* Implementation of 'glove.glove_cython' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_i[] = "i";
static const char __pyx_k_j[] = "j";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_sp[] = "sp";
static const char __pyx_k__19[] = "*";
static const char __pyx_k_col[] = "col";
static const char __pyx_k_dim[] = "dim";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_row[] = "row";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_loss[] = "loss";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_alpha[] = "alpha";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_count[] = "count";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_counts[] = "counts";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_word_a[] = "word_a";
static const char __pyx_k_word_b[] = "word_b";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_wordvec[] = "wordvec";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_gradient[] = "gradient";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_max_loss[] = "max_loss";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_wordbias[] = "wordbias";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_max_count[] = "max_count";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_no_threads[] = "no_threads";
static const char __pyx_k_prediction[] = "prediction";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_collections[] = "collections";
static const char __pyx_k_fit_vectors[] = "fit_vectors";
static const char __pyx_k_entry_weight[] = "entry_weight";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_scipy_sparse[] = "scipy.sparse";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_learning_rate[] = "learning_rate";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_shuffle_index[] = "shuffle_index";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_shuffle_indices[] = "shuffle_indices";
static const char __pyx_k_no_cooccurrences[] = "no_cooccurrences";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_glove_glove_cython[] = "glove.glove_cython";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_initial_learning_rate[] = "initial_learning_rate";
static const char __pyx_k_wordvec_sum_gradients[] = "wordvec_sum_gradients";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_glove_glove_cython_pyx[] = "glove/glove_cython.pyx";
static const char __pyx_k_wordbias_sum_gradients[] = "wordbias_sum_gradients";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_n_s__19;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_alpha;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_col;
static PyObject *__pyx_n_s_collections;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_count;
static PyObject *__pyx_n_s_counts;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_dim;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_entry_weight;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_fit_vectors;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_n_s_glove_glove_cython;
static PyObject *__pyx_kp_s_glove_glove_cython_pyx;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_gradient;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_initial_learning_rate;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_j;
static PyObject *__pyx_n_s_learning_rate;
static PyObject *__pyx_n_s_loss;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_max_count;
static PyObject *__pyx_n_s_max_loss;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_n_s_no_cooccurrences;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_no_threads;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_prediction;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_row;
static PyObject *__pyx_n_s_scipy_sparse;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_shuffle_index;
static PyObject *__pyx_n_s_shuffle_indices;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_sp;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_word_a;
static PyObject *__pyx_n_s_word_b;
static PyObject *__pyx_n_s_wordbias;
static PyObject *__pyx_n_s_wordbias_sum_gradients;
static PyObject *__pyx_n_s_wordvec;
static PyObject *__pyx_n_s_wordvec_sum_gradients;
static PyObject *__pyx_pf_5glove_12glove_cython_fit_vectors(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordvec_sum_gradients, __Pyx_memviewslice __pyx_v_wordbias, __Pyx_memviewslice __pyx_v_wordbias_sum_gradients, __Pyx_memviewslice __pyx_v_row, __Pyx_memviewslice __pyx_v_col, __Pyx_memviewslice __pyx_v_counts, __Pyx_memviewslice __pyx_v_shuffle_indices, double __pyx_v_initial_learning_rate, double __pyx_v_max_count, double __pyx_v_alpha, double __pyx_v_max_loss, CYTHON_UNUSED int __pyx_v_no_threads); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__15;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__22;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_tuple__26;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_codeobj__21;
static PyObject *__pyx_codeobj__28;
/* Late includes */
/* "glove/glove_cython.pyx":10
*
*
* cdef inline double double_min(double a, double b) nogil: return a if a <= b else b # <<<<<<<<<<<<<<
* cdef inline int int_min(int a, int b) nogil: return a if a <= b else b
* cdef inline int int_max(int a, int b) nogil: return a if a > b else b
*/
static CYTHON_INLINE double __pyx_f_5glove_12glove_cython_double_min(double __pyx_v_a, double __pyx_v_b) {
double __pyx_r;
double __pyx_t_1;
if (((__pyx_v_a <= __pyx_v_b) != 0)) {
__pyx_t_1 = __pyx_v_a;
} else {
__pyx_t_1 = __pyx_v_b;
}
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "glove/glove_cython.pyx":11
*
* cdef inline double double_min(double a, double b) nogil: return a if a <= b else b
* cdef inline int int_min(int a, int b) nogil: return a if a <= b else b # <<<<<<<<<<<<<<
* cdef inline int int_max(int a, int b) nogil: return a if a > b else b
*
*/
static CYTHON_INLINE int __pyx_f_5glove_12glove_cython_int_min(int __pyx_v_a, int __pyx_v_b) {
int __pyx_r;
int __pyx_t_1;
if (((__pyx_v_a <= __pyx_v_b) != 0)) {
__pyx_t_1 = __pyx_v_a;
} else {
__pyx_t_1 = __pyx_v_b;
}
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "glove/glove_cython.pyx":12
* cdef inline double double_min(double a, double b) nogil: return a if a <= b else b
* cdef inline int int_min(int a, int b) nogil: return a if a <= b else b
* cdef inline int int_max(int a, int b) nogil: return a if a > b else b # <<<<<<<<<<<<<<
*
*
*/
static CYTHON_INLINE int __pyx_f_5glove_12glove_cython_int_max(int __pyx_v_a, int __pyx_v_b) {
int __pyx_r;
int __pyx_t_1;
if (((__pyx_v_a > __pyx_v_b) != 0)) {
__pyx_t_1 = __pyx_v_a;
} else {
__pyx_t_1 = __pyx_v_b;
}
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "glove/glove_cython.pyx":20
*
*
* def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<<
* double[:, ::1] wordvec_sum_gradients,
* double[::1] wordbias,
*/
/* Python wrapper */
static PyObject *__pyx_pw_5glove_12glove_cython_1fit_vectors(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_5glove_12glove_cython_fit_vectors[] = "\n Estimate GloVe word embeddings given the cooccurrence matrix.\n Modifies the word vector and word bias array in-place.\n\n Training is performed via asynchronous stochastic gradient descent,\n using the AdaGrad per-coordinate learning rate.\n ";
static PyMethodDef __pyx_mdef_5glove_12glove_cython_1fit_vectors = {"fit_vectors", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5glove_12glove_cython_1fit_vectors, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5glove_12glove_cython_fit_vectors};
static PyObject *__pyx_pw_5glove_12glove_cython_1fit_vectors(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_wordvec = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_wordvec_sum_gradients = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_wordbias = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_wordbias_sum_gradients = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_row = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_col = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_counts = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_shuffle_indices = { 0, 0, { 0 }, { 0 }, { 0 } };
double __pyx_v_initial_learning_rate;
double __pyx_v_max_count;
double __pyx_v_alpha;
double __pyx_v_max_loss;
CYTHON_UNUSED int __pyx_v_no_threads;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("fit_vectors (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wordvec,&__pyx_n_s_wordvec_sum_gradients,&__pyx_n_s_wordbias,&__pyx_n_s_wordbias_sum_gradients,&__pyx_n_s_row,&__pyx_n_s_col,&__pyx_n_s_counts,&__pyx_n_s_shuffle_indices,&__pyx_n_s_initial_learning_rate,&__pyx_n_s_max_count,&__pyx_n_s_alpha,&__pyx_n_s_max_loss,&__pyx_n_s_no_threads,0};
PyObject* values[13] = {0,0,0,0,0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
CYTHON_FALLTHROUGH;
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
CYTHON_FALLTHROUGH;
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
CYTHON_FALLTHROUGH;
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
CYTHON_FALLTHROUGH;
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
CYTHON_FALLTHROUGH;
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
CYTHON_FALLTHROUGH;
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
CYTHON_FALLTHROUGH;
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordvec)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordvec_sum_gradients)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 1); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordbias)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 2); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordbias_sum_gradients)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 3); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_row)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 4); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 5:
if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_col)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 5); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 6:
if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_counts)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 6); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 7:
if (likely((values[7] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shuffle_indices)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 7); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 8:
if (likely((values[8] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_initial_learning_rate)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 8); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 9:
if (likely((values[9] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_count)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 9); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 10:
if (likely((values[10] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 10); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 11:
if (likely((values[11] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_loss)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 11); __PYX_ERR(0, 20, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 12:
if (likely((values[12] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_no_threads)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 12); __PYX_ERR(0, 20, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fit_vectors") < 0)) __PYX_ERR(0, 20, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 13) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
values[12] = PyTuple_GET_ITEM(__pyx_args, 12);
}
__pyx_v_wordvec = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordvec.memview)) __PYX_ERR(0, 20, __pyx_L3_error)
__pyx_v_wordvec_sum_gradients = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordvec_sum_gradients.memview)) __PYX_ERR(0, 21, __pyx_L3_error)
__pyx_v_wordbias = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordbias.memview)) __PYX_ERR(0, 22, __pyx_L3_error)
__pyx_v_wordbias_sum_gradients = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordbias_sum_gradients.memview)) __PYX_ERR(0, 23, __pyx_L3_error)
__pyx_v_row = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[4], PyBUF_WRITABLE); if (unlikely(!__pyx_v_row.memview)) __PYX_ERR(0, 24, __pyx_L3_error)
__pyx_v_col = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[5], PyBUF_WRITABLE); if (unlikely(!__pyx_v_col.memview)) __PYX_ERR(0, 25, __pyx_L3_error)
__pyx_v_counts = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[6], PyBUF_WRITABLE); if (unlikely(!__pyx_v_counts.memview)) __PYX_ERR(0, 26, __pyx_L3_error)
__pyx_v_shuffle_indices = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[7], PyBUF_WRITABLE); if (unlikely(!__pyx_v_shuffle_indices.memview)) __PYX_ERR(0, 27, __pyx_L3_error)
__pyx_v_initial_learning_rate = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_initial_learning_rate == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 28, __pyx_L3_error)
__pyx_v_max_count = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_max_count == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 29, __pyx_L3_error)
__pyx_v_alpha = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 30, __pyx_L3_error)
__pyx_v_max_loss = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_max_loss == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 31, __pyx_L3_error)
__pyx_v_no_threads = __Pyx_PyInt_As_int(values[12]); if (unlikely((__pyx_v_no_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 20, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("glove.glove_cython.fit_vectors", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5glove_12glove_cython_fit_vectors(__pyx_self, __pyx_v_wordvec, __pyx_v_wordvec_sum_gradients, __pyx_v_wordbias, __pyx_v_wordbias_sum_gradients, __pyx_v_row, __pyx_v_col, __pyx_v_counts, __pyx_v_shuffle_indices, __pyx_v_initial_learning_rate, __pyx_v_max_count, __pyx_v_alpha, __pyx_v_max_loss, __pyx_v_no_threads);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5glove_12glove_cython_fit_vectors(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordvec_sum_gradients, __Pyx_memviewslice __pyx_v_wordbias, __Pyx_memviewslice __pyx_v_wordbias_sum_gradients, __Pyx_memviewslice __pyx_v_row, __Pyx_memviewslice __pyx_v_col, __Pyx_memviewslice __pyx_v_counts, __Pyx_memviewslice __pyx_v_shuffle_indices, double __pyx_v_initial_learning_rate, double __pyx_v_max_count, double __pyx_v_alpha, double __pyx_v_max_loss, CYTHON_UNUSED int __pyx_v_no_threads) {
int __pyx_v_dim;
CYTHON_UNUSED int __pyx_v_no_cooccurrences;
int __pyx_v_word_a;
int __pyx_v_word_b;
double __pyx_v_count;
double __pyx_v_learning_rate;
double __pyx_v_gradient;
double __pyx_v_prediction;
double __pyx_v_entry_weight;
double __pyx_v_loss;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_shuffle_index;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
int __pyx_t_17;
Py_ssize_t __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
Py_ssize_t __pyx_t_26;
Py_ssize_t __pyx_t_27;
Py_ssize_t __pyx_t_28;
Py_ssize_t __pyx_t_29;
Py_ssize_t __pyx_t_30;
Py_ssize_t __pyx_t_31;
Py_ssize_t __pyx_t_32;
Py_ssize_t __pyx_t_33;
Py_ssize_t __pyx_t_34;
Py_ssize_t __pyx_t_35;
Py_ssize_t __pyx_t_36;
Py_ssize_t __pyx_t_37;
Py_ssize_t __pyx_t_38;
Py_ssize_t __pyx_t_39;
Py_ssize_t __pyx_t_40;
Py_ssize_t __pyx_t_41;
Py_ssize_t __pyx_t_42;
Py_ssize_t __pyx_t_43;
__Pyx_RefNannySetupContext("fit_vectors", 0);
/* "glove/glove_cython.pyx":43
* # Get number of latent dimensions and
* # number of cooccurrences.
* cdef int dim = wordvec.shape[1] # <<<<<<<<<<<<<<
* cdef int no_cooccurrences = row.shape[0]
*
*/
__pyx_v_dim = (__pyx_v_wordvec.shape[1]);
/* "glove/glove_cython.pyx":44
* # number of cooccurrences.
* cdef int dim = wordvec.shape[1]
* cdef int no_cooccurrences = row.shape[0] # <<<<<<<<<<<<<<
*
* # Hold indices of current words and
*/
__pyx_v_no_cooccurrences = (__pyx_v_row.shape[0]);
/* "glove/glove_cython.pyx":59
* # We iterate over random indices to simulate
* # shuffling the cooccurrence matrix.
* with nogil: # <<<<<<<<<<<<<<
* for j in prange(no_cooccurrences, num_threads=no_threads,
* schedule='dynamic'):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
/* "glove/glove_cython.pyx":60
* # shuffling the cooccurrence matrix.
* with nogil:
* for j in prange(no_cooccurrences, num_threads=no_threads, # <<<<<<<<<<<<<<
* schedule='dynamic'):
* shuffle_index = shuffle_indices[j]
*/
__pyx_t_1 = __pyx_v_no_cooccurrences;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel num_threads(__pyx_v_no_threads) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_4, __pyx_t_40, __pyx_t_41, __pyx_t_42, __pyx_t_43, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_count) lastprivate(__pyx_v_entry_weight) lastprivate(__pyx_v_gradient) lastprivate(__pyx_v_i) firstprivate(__pyx_v_j) lastprivate(__pyx_v_j) lastprivate(__pyx_v_learning_rate) lastprivate(__pyx_v_loss) lastprivate(__pyx_v_prediction) lastprivate(__pyx_v_shuffle_index) lastprivate(__pyx_v_word_a) lastprivate(__pyx_v_word_b) schedule(dynamic)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
{
__pyx_v_j = (int)(0 + 1 * __pyx_t_2);
/* Initialize private variables to invalid values */
__pyx_v_count = ((double)__PYX_NAN());
__pyx_v_entry_weight = ((double)__PYX_NAN());
__pyx_v_gradient = ((double)__PYX_NAN());
__pyx_v_i = ((int)0xbad0bad0);
__pyx_v_learning_rate = ((double)__PYX_NAN());
__pyx_v_loss = ((double)__PYX_NAN());
__pyx_v_prediction = ((double)__PYX_NAN());
__pyx_v_shuffle_index = ((int)0xbad0bad0);
__pyx_v_word_a = ((int)0xbad0bad0);
__pyx_v_word_b = ((int)0xbad0bad0);
/* "glove/glove_cython.pyx":62
* for j in prange(no_cooccurrences, num_threads=no_threads,
* schedule='dynamic'):
* shuffle_index = shuffle_indices[j] # <<<<<<<<<<<<<<
* word_a = row[shuffle_index]
* word_b = col[shuffle_index]
*/
__pyx_t_4 = __pyx_v_j;
__pyx_v_shuffle_index = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_shuffle_indices.data) + __pyx_t_4)) )));
/* "glove/glove_cython.pyx":63
* schedule='dynamic'):
* shuffle_index = shuffle_indices[j]
* word_a = row[shuffle_index] # <<<<<<<<<<<<<<
* word_b = col[shuffle_index]
* count = counts[shuffle_index]
*/
__pyx_t_5 = __pyx_v_shuffle_index;
__pyx_v_word_a = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_row.data) + __pyx_t_5)) )));
/* "glove/glove_cython.pyx":64
* shuffle_index = shuffle_indices[j]
* word_a = row[shuffle_index]
* word_b = col[shuffle_index] # <<<<<<<<<<<<<<
* count = counts[shuffle_index]
*
*/
__pyx_t_6 = __pyx_v_shuffle_index;
__pyx_v_word_b = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_col.data) + __pyx_t_6)) )));
/* "glove/glove_cython.pyx":65
* word_a = row[shuffle_index]
* word_b = col[shuffle_index]
* count = counts[shuffle_index] # <<<<<<<<<<<<<<
*
* # Get prediction
*/
__pyx_t_7 = __pyx_v_shuffle_index;
__pyx_v_count = (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_counts.data) + __pyx_t_7)) )));
/* "glove/glove_cython.pyx":68
*
* # Get prediction
* prediction = 0.0 # <<<<<<<<<<<<<<
*
* for i in range(dim):
*/
__pyx_v_prediction = 0.0;
/* "glove/glove_cython.pyx":70
* prediction = 0.0
*
* for i in range(dim): # <<<<<<<<<<<<<<
* prediction = prediction + wordvec[word_a, i] * wordvec[word_b, i]
*
*/
__pyx_t_8 = __pyx_v_dim;
__pyx_t_9 = __pyx_t_8;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_i = __pyx_t_10;
/* "glove/glove_cython.pyx":71
*
* for i in range(dim):
* prediction = prediction + wordvec[word_a, i] * wordvec[word_b, i] # <<<<<<<<<<<<<<
*
* prediction = prediction + wordbias[word_a] + wordbias[word_b]
*/
__pyx_t_11 = __pyx_v_word_a;
__pyx_t_12 = __pyx_v_i;
__pyx_t_13 = __pyx_v_word_b;
__pyx_t_14 = __pyx_v_i;
__pyx_v_prediction = (__pyx_v_prediction + ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_11 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_12)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_13 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_14)) )))));
}
/* "glove/glove_cython.pyx":73
* prediction = prediction + wordvec[word_a, i] * wordvec[word_b, i]
*
* prediction = prediction + wordbias[word_a] + wordbias[word_b] # <<<<<<<<<<<<<<
*
* # Compute loss and the example weight.
*/
__pyx_t_15 = __pyx_v_word_a;
__pyx_t_16 = __pyx_v_word_b;
__pyx_v_prediction = ((__pyx_v_prediction + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_15)) )))) + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_16)) ))));
/* "glove/glove_cython.pyx":76
*
* # Compute loss and the example weight.
* entry_weight = double_min(1.0, (count / max_count)) ** alpha # <<<<<<<<<<<<<<
* loss = entry_weight * (prediction - c_log(count))
*
*/
__pyx_v_entry_weight = pow(__pyx_f_5glove_12glove_cython_double_min(1.0, (__pyx_v_count / __pyx_v_max_count)), __pyx_v_alpha);
/* "glove/glove_cython.pyx":77
* # Compute loss and the example weight.
* entry_weight = double_min(1.0, (count / max_count)) ** alpha
* loss = entry_weight * (prediction - c_log(count)) # <<<<<<<<<<<<<<
*
* # Clip the loss for numerical stability.
*/
__pyx_v_loss = (__pyx_v_entry_weight * (__pyx_v_prediction - log(__pyx_v_count)));
/* "glove/glove_cython.pyx":80
*
* # Clip the loss for numerical stability.
* if loss < -max_loss: # <<<<<<<<<<<<<<
* loss = -max_loss
* elif loss > max_loss:
*/
__pyx_t_17 = ((__pyx_v_loss < (-__pyx_v_max_loss)) != 0);
if (__pyx_t_17) {
/* "glove/glove_cython.pyx":81
* # Clip the loss for numerical stability.
* if loss < -max_loss:
* loss = -max_loss # <<<<<<<<<<<<<<
* elif loss > max_loss:
* loss = max_loss
*/
__pyx_v_loss = (-__pyx_v_max_loss);
/* "glove/glove_cython.pyx":80
*
* # Clip the loss for numerical stability.
* if loss < -max_loss: # <<<<<<<<<<<<<<
* loss = -max_loss
* elif loss > max_loss:
*/
goto __pyx_L12;
}
/* "glove/glove_cython.pyx":82
* if loss < -max_loss:
* loss = -max_loss
* elif loss > max_loss: # <<<<<<<<<<<<<<
* loss = max_loss
*
*/
__pyx_t_17 = ((__pyx_v_loss > __pyx_v_max_loss) != 0);
if (__pyx_t_17) {
/* "glove/glove_cython.pyx":83
* loss = -max_loss
* elif loss > max_loss:
* loss = max_loss # <<<<<<<<<<<<<<
*
* # Update step: apply gradients and reproject
*/
__pyx_v_loss = __pyx_v_max_loss;
/* "glove/glove_cython.pyx":82
* if loss < -max_loss:
* loss = -max_loss
* elif loss > max_loss: # <<<<<<<<<<<<<<
* loss = max_loss
*
*/
}
__pyx_L12:;
/* "glove/glove_cython.pyx":87
* # Update step: apply gradients and reproject
* # onto the unit sphere.
* for i in range(dim): # <<<<<<<<<<<<<<
*
* learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i])
*/
__pyx_t_8 = __pyx_v_dim;
__pyx_t_9 = __pyx_t_8;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_i = __pyx_t_10;
/* "glove/glove_cython.pyx":89
* for i in range(dim):
*
* learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i]) # <<<<<<<<<<<<<<
* gradient = loss * wordvec[word_b, i]
* wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate
*/
__pyx_t_18 = __pyx_v_word_a;
__pyx_t_19 = __pyx_v_i;
__pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_18 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_19)) )))));
/* "glove/glove_cython.pyx":90
*
* learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i])
* gradient = loss * wordvec[word_b, i] # <<<<<<<<<<<<<<
* wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate
* * gradient)
*/
__pyx_t_20 = __pyx_v_word_b;
__pyx_t_21 = __pyx_v_i;
__pyx_v_gradient = (__pyx_v_loss * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_20 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_21)) ))));
/* "glove/glove_cython.pyx":91
* learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i])
* gradient = loss * wordvec[word_b, i]
* wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate # <<<<<<<<<<<<<<
* * gradient)
* wordvec_sum_gradients[word_a, i] += gradient ** 2
*/
__pyx_t_22 = __pyx_v_word_a;
__pyx_t_23 = __pyx_v_i;
/* "glove/glove_cython.pyx":92
* gradient = loss * wordvec[word_b, i]
* wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate
* * gradient) # <<<<<<<<<<<<<<
* wordvec_sum_gradients[word_a, i] += gradient ** 2
*
*/
__pyx_t_24 = __pyx_v_word_a;
__pyx_t_25 = __pyx_v_i;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_24 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_25)) )) = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_22 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_23)) ))) - (__pyx_v_learning_rate * __pyx_v_gradient));
/* "glove/glove_cython.pyx":93
* wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate
* * gradient)
* wordvec_sum_gradients[word_a, i] += gradient ** 2 # <<<<<<<<<<<<<<
*
* learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i])
*/
__pyx_t_26 = __pyx_v_word_a;
__pyx_t_27 = __pyx_v_i;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_26 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_27)) )) += pow(__pyx_v_gradient, 2.0);
/* "glove/glove_cython.pyx":95
* wordvec_sum_gradients[word_a, i] += gradient ** 2
*
* learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i]) # <<<<<<<<<<<<<<
* gradient = loss * wordvec[word_a, i]
* wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate
*/
__pyx_t_28 = __pyx_v_word_b;
__pyx_t_29 = __pyx_v_i;
__pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_28 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_29)) )))));
/* "glove/glove_cython.pyx":96
*
* learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i])
* gradient = loss * wordvec[word_a, i] # <<<<<<<<<<<<<<
* wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate
* * gradient)
*/
__pyx_t_30 = __pyx_v_word_a;
__pyx_t_31 = __pyx_v_i;
__pyx_v_gradient = (__pyx_v_loss * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_30 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_31)) ))));
/* "glove/glove_cython.pyx":97
* learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i])
* gradient = loss * wordvec[word_a, i]
* wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate # <<<<<<<<<<<<<<
* * gradient)
* wordvec_sum_gradients[word_b, i] += gradient ** 2
*/
__pyx_t_32 = __pyx_v_word_b;
__pyx_t_33 = __pyx_v_i;
/* "glove/glove_cython.pyx":98
* gradient = loss * wordvec[word_a, i]
* wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate
* * gradient) # <<<<<<<<<<<<<<
* wordvec_sum_gradients[word_b, i] += gradient ** 2
*
*/
__pyx_t_34 = __pyx_v_word_b;
__pyx_t_35 = __pyx_v_i;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_34 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_35)) )) = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_32 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_33)) ))) - (__pyx_v_learning_rate * __pyx_v_gradient));
/* "glove/glove_cython.pyx":99
* wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate
* * gradient)
* wordvec_sum_gradients[word_b, i] += gradient ** 2 # <<<<<<<<<<<<<<
*
* # Update word biases.
*/
__pyx_t_36 = __pyx_v_word_b;
__pyx_t_37 = __pyx_v_i;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_36 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_37)) )) += pow(__pyx_v_gradient, 2.0);
}
/* "glove/glove_cython.pyx":102
*
* # Update word biases.
* learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_a]) # <<<<<<<<<<<<<<
* wordbias[word_a] -= learning_rate * loss
* wordbias_sum_gradients[word_a] += loss ** 2
*/
__pyx_t_38 = __pyx_v_word_a;
__pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_38)) )))));
/* "glove/glove_cython.pyx":103
* # Update word biases.
* learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_a])
* wordbias[word_a] -= learning_rate * loss # <<<<<<<<<<<<<<
* wordbias_sum_gradients[word_a] += loss ** 2
*
*/
__pyx_t_39 = __pyx_v_word_a;
*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_39)) )) -= (__pyx_v_learning_rate * __pyx_v_loss);
/* "glove/glove_cython.pyx":104
* learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_a])
* wordbias[word_a] -= learning_rate * loss
* wordbias_sum_gradients[word_a] += loss ** 2 # <<<<<<<<<<<<<<
*
* learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b])
*/
__pyx_t_40 = __pyx_v_word_a;
*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_40)) )) += pow(__pyx_v_loss, 2.0);
/* "glove/glove_cython.pyx":106
* wordbias_sum_gradients[word_a] += loss ** 2
*
* learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b]) # <<<<<<<<<<<<<<
* wordbias[word_b] -= learning_rate * loss
* wordbias_sum_gradients[word_b] += loss ** 2
*/
__pyx_t_41 = __pyx_v_word_b;
__pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_41)) )))));
/* "glove/glove_cython.pyx":107
*
* learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b])
* wordbias[word_b] -= learning_rate * loss # <<<<<<<<<<<<<<
* wordbias_sum_gradients[word_b] += loss ** 2
*/
__pyx_t_42 = __pyx_v_word_b;
*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_42)) )) -= (__pyx_v_learning_rate * __pyx_v_loss);
/* "glove/glove_cython.pyx":108
* learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b])
* wordbias[word_b] -= learning_rate * loss
* wordbias_sum_gradients[word_b] += loss ** 2 # <<<<<<<<<<<<<<
*/
__pyx_t_43 = __pyx_v_word_b;
*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_43)) )) += pow(__pyx_v_loss, 2.0);
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "glove/glove_cython.pyx":59
* # We iterate over random indices to simulate
* # shuffling the cooccurrence matrix.
* with nogil: # <<<<<<<<<<<<<<
* for j in prange(no_cooccurrences, num_threads=no_threads,
* schedule='dynamic'):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "glove/glove_cython.pyx":20
*
*
* def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<<
* double[:, ::1] wordvec_sum_gradients,
* double[::1] wordbias,
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__PYX_XDEC_MEMVIEW(&__pyx_v_wordvec, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_wordvec_sum_gradients, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_wordbias, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_wordbias_sum_gradients, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_row, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_col, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_counts, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_shuffle_indices, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(1, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 180, __pyx_L1_error)
}
__pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
*
* cdef int i
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
}
/* "View.MemoryView":378
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":379
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":380
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":381
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":382
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":384
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":383
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":382
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":385
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":380
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":387
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":378
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":389
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":391
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":393
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 393, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 393, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 393, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 393, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 393, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 393, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 393, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":394
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 394, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 394, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":393
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":396
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":389
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":399
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":400
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":401
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":400
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":403
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 403, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 403, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":406
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 406, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":407
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":406
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":409
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 409, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":410
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 410, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":399
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":412
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":413
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":414
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 414, __pyx_L1_error)
/* "View.MemoryView":413
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":416
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 416, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 416, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":418
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 418, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":419
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 419, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":420
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 420, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":421
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 421, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 421, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":420
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":423
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 423, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":418
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":425
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":412
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":427
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":428
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":429
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":430
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 430, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":431
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 431, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":430
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 430, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 430, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":429
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":432
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 432, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":433
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":429
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":428
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":435
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":427
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":437
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":441
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 441, __pyx_L1_error)
/* "View.MemoryView":442
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 442, __pyx_L1_error)
/* "View.MemoryView":443
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 443, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 443, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 443, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 443, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":441
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 441, __pyx_L1_error)
/* "View.MemoryView":437
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":445
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
char const *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":447
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":452
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice));
/* "View.MemoryView":454
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":455
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":456
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_1 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":457
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 457, __pyx_L1_error)
/* "View.MemoryView":456
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":458
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":454
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":460
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":462
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":463
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":464
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":463
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":466
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 466, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L8:;
/* "View.MemoryView":470
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":471
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 471, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":470
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":472
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":475
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_6);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11);
}
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8);
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":445
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":477
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":478
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 478, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":479
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 479, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":477
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":484
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":487
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":488
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":489
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":488
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":493
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":494
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":493
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":495
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":490
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 490, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 490, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":491
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 491, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 491, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":488
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":481
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":497
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":500
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 500, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":505
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":506
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 506, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":505
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":508
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 508, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":510
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 510, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":511
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":510
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":511
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":497
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":514
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":515
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":516
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 516, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 516, __pyx_L1_error)
/* "View.MemoryView":515
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":518
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":519
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":518
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":521
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":523
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":524
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":523
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":526
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":528
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":529
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":528
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":531
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":533
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":534
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":533
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":536
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":538
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":539
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":540
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":541
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":542
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":543
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":514
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":549
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":550
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 550, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":551
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 551, __pyx_L1_error)
/* "View.MemoryView":552
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":549
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":555
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":556
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":555
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 560, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 560, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 560, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 560, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":566
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 566, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 566, __pyx_L1_error)
/* "View.MemoryView":564
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":568
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 568, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 568, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 568, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 568, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":571
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":572
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":573
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 573, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 573, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":572
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":575
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 575, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 575, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 575, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 575, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":571
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":578
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":579
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":578
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":592
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":594
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 594, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":595
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 595, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":597
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":591
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":599
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":601
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":602
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":603
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":602
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":605
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":601
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":607
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":608
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":609
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 609, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":608
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":607
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":612
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":618
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":619
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 619, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":621
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":624
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":625
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 625, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":621
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":627
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":629
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":631
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":632
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 632, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":637
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 637, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":627
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":639
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":641
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":643
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":644
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 644, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":649
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 649, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":639
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":653
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":654
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 654, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 654, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 654, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 654, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":655
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":656
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":653
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":659
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":660
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":659
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":662
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":667
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":668
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 668, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":667
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":670
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":672
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":673
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":674
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":675
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 675, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 675, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 675, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 675, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":676
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":677
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":678
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 678, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 678, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__15);
__Pyx_GIVEREF(__pyx_slice__15);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 678, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":679
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":677
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":681
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 681, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":682
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":676
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":684
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":685
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 685, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 685, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(1, 685, __pyx_L1_error)
/* "View.MemoryView":684
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":687
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":688
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 688, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":675
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":690
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 690, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":691
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":692
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 692, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__15);
__Pyx_GIVEREF(__pyx_slice__15);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":691
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":694
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 694, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":662
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":696
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":697
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":698
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":699
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 699, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 699, __pyx_L1_error)
/* "View.MemoryView":698
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":696
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":706
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":707
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":714
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":718
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 718, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":720
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":721
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 721, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":722
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":720
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":724
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":725
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":731
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":732
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":737
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":738
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":742
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 742, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 742, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 742, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 742, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 742, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 742, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 742, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":743
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":747
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 747, __pyx_L1_error)
/* "View.MemoryView":744
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 744, __pyx_L1_error)
/* "View.MemoryView":743
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":750
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":751
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":752
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":753
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":754
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":750
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":756
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 756, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 756, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 756, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":757
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 757, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 757, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 757, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":758
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 758, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 758, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 758, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":760
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":761
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":762
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":764
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 764, __pyx_L1_error)
/* "View.MemoryView":770
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":742
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":772
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":773
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":774
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 774, __pyx_L1_error) }
/* "View.MemoryView":775
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 775, __pyx_L1_error) }
/* "View.MemoryView":773
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 773, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 773, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":772
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":778
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":779
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 778, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":778
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 778, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":706
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":803
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":823
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":825
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":826
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":825
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":827
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":828
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 828, __pyx_L1_error)
/* "View.MemoryView":827
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":823
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":831
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":833
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":834
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 834, __pyx_L1_error)
/* "View.MemoryView":833
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":837
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":839
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":840
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":841
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":840
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":838
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":842
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":844
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":843
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":846
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":842
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":837
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":848
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":849
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":848
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":851
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":853
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":854
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":855
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":856
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":857
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":856
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":854
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":858
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":858
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":853
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":861
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":862
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":861
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":864
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":866
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":867
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":866
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":871
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":873
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":874
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":873
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":876
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":877
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":876
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":880
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":881
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":882
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":885
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":886
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":885
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":888
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":890
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":891
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":892
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":893
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":892
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":895
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":896
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 895, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":891
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":898
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":890
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":900
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":803
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":906
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":908
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":909
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":912
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":913
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 913, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 913, __pyx_L1_error)
}
__pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize);
/* "View.MemoryView":914
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":912
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":916
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":917
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":918
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":919
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":918
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":921
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":922
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":923
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":924
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 924, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 924, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 924, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 924, __pyx_L1_error)
/* "View.MemoryView":923
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":921
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":926
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":927
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 927, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 927, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 927, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 927, __pyx_L1_error)
/* "View.MemoryView":926
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":929
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":930
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":931
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":930
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":933
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":906
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":939
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
/* "View.MemoryView":940
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":942
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":943
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = (__pyx_v_ndim / 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":948
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":949
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":950
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":952
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":953
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 953, __pyx_L1_error)
/* "View.MemoryView":952
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":955
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":939
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":972
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":973
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":972
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":975
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":976
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":977
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 977, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":976
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":979
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 979, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":975
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":981
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":982
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":983
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 983, __pyx_L1_error)
/* "View.MemoryView":982
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":985
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 985, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":981
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":988
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":989
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":988
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":995
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1003
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1004
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1003
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1009
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1009, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1009, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1009, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1011
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1012
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1014
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1014, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1015
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1017
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1018
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1019
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1020
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1021
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1023
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1024
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1023
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1026
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1028
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1029
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1032
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1033
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1034
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1035
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1036
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1034
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1038
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1039
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1039, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1040
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1040, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1040, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1040, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1042
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1043
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1045
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":995
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1048
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1051
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1052
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1052, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1053
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1051
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1055
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1056
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1048
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1059
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1063
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1064
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1065
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1067
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1068
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1070
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1071
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1072
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1073
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1059
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1076
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1079
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1080
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1080, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1076
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1083
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1090
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1091
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1092
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1090
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1094
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1095
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1097
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1099
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1097, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1083
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1105
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1106
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1107
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1106
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1109
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1105
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1112
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1117
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1118
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1120
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1121
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1122
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1123
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1121
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1125
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1126
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1127
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1128
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1126
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1130
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1130
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1133
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1112
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1136
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1143
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1144
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1145
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1146
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1149
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1150
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1149
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1151
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1149
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1153
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1154
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1155
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1156
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1148
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1158
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1159
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1163
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1164
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1136
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1166
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1169
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1166
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1173
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1176
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
* cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1178
* cdef Py_ssize_t size = src.memview.view.itemsize
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* size *= src.shape[i]
*
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1179
*
* for i in range(ndim):
* size *= src.shape[i] # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i]));
}
/* "View.MemoryView":1181
* size *= src.shape[i]
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1173
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1184
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1193
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1194
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1195
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1196
* for idx in range(ndim):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1193
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1198
* stride = stride * shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1199
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1200
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1202
* stride = stride * shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1184
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1205
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
/* "View.MemoryView":1216
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1217
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1219
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1220
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1221
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1221, __pyx_L1_error)
/* "View.MemoryView":1220
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1224
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1225
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1226
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1227
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1228
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1230
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1234
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1235
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1236
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1235
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1238
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1238
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1241
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1243
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1205
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1248
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1251
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1250
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 1250, __pyx_L1_error)
/* "View.MemoryView":1248
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1254
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1255
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1255, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1255, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1255, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1255, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1255, __pyx_L1_error)
/* "View.MemoryView":1254
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1258
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1259
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1260
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1260, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1260, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1260, __pyx_L1_error)
/* "View.MemoryView":1259
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1262
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1262, __pyx_L1_error)
}
/* "View.MemoryView":1258
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1265
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
/* "View.MemoryView":1273
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1274
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1276
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1277
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1278
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1281
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1282
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1281
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1283
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1284
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1283
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1286
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1288
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1289
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1290
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1291
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1292
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1290
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1294
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1294, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1289
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1296
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1297
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error)
/* "View.MemoryView":1296
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1299
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1301
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1302
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1301
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1304
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1304, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1305
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1299
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1307
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1310
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1311
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1310
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1312
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1312
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1315
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1317
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1318
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1319
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1320
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1321
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1315
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1307
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1323
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1326
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1326, __pyx_L1_error)
/* "View.MemoryView":1327
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1327, __pyx_L1_error)
/* "View.MemoryView":1323
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1329
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1330
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1331
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1333
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1334
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1265
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1337
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1341
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1343
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1344
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1345
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1346
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1348
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1349
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1350
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1351
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1337
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1359
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1363
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1364
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1363
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1359
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1368
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1371
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1368
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1374
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1378
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1379
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1380
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1381
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1380
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1383
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1379
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1385
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1386
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1388
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1374
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1394
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1397
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1398
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1400
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1394
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1404
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1408
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1409
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1411
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1412
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1413
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1414
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1411
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1416
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1417
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1419
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1404
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_array___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"glove.glove_cython.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"glove.glove_cython.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryview___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"glove.glove_cython.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryviewslice___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"glove.glove_cython._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_glove_cython(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_glove_cython},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"glove_cython",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_n_s__19, __pyx_k__19, sizeof(__pyx_k__19), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_alpha, __pyx_k_alpha, sizeof(__pyx_k_alpha), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_col, __pyx_k_col, sizeof(__pyx_k_col), 0, 0, 1, 1},
{&__pyx_n_s_collections, __pyx_k_collections, sizeof(__pyx_k_collections), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1},
{&__pyx_n_s_counts, __pyx_k_counts, sizeof(__pyx_k_counts), 0, 0, 1, 1},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_dim, __pyx_k_dim, sizeof(__pyx_k_dim), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_entry_weight, __pyx_k_entry_weight, sizeof(__pyx_k_entry_weight), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_fit_vectors, __pyx_k_fit_vectors, sizeof(__pyx_k_fit_vectors), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_n_s_glove_glove_cython, __pyx_k_glove_glove_cython, sizeof(__pyx_k_glove_glove_cython), 0, 0, 1, 1},
{&__pyx_kp_s_glove_glove_cython_pyx, __pyx_k_glove_glove_cython_pyx, sizeof(__pyx_k_glove_glove_cython_pyx), 0, 0, 1, 0},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_gradient, __pyx_k_gradient, sizeof(__pyx_k_gradient), 0, 0, 1, 1},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_initial_learning_rate, __pyx_k_initial_learning_rate, sizeof(__pyx_k_initial_learning_rate), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
{&__pyx_n_s_learning_rate, __pyx_k_learning_rate, sizeof(__pyx_k_learning_rate), 0, 0, 1, 1},
{&__pyx_n_s_loss, __pyx_k_loss, sizeof(__pyx_k_loss), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_max_count, __pyx_k_max_count, sizeof(__pyx_k_max_count), 0, 0, 1, 1},
{&__pyx_n_s_max_loss, __pyx_k_max_loss, sizeof(__pyx_k_max_loss), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_n_s_no_cooccurrences, __pyx_k_no_cooccurrences, sizeof(__pyx_k_no_cooccurrences), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_no_threads, __pyx_k_no_threads, sizeof(__pyx_k_no_threads), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_prediction, __pyx_k_prediction, sizeof(__pyx_k_prediction), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_row, __pyx_k_row, sizeof(__pyx_k_row), 0, 0, 1, 1},
{&__pyx_n_s_scipy_sparse, __pyx_k_scipy_sparse, sizeof(__pyx_k_scipy_sparse), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_shuffle_index, __pyx_k_shuffle_index, sizeof(__pyx_k_shuffle_index), 0, 0, 1, 1},
{&__pyx_n_s_shuffle_indices, __pyx_k_shuffle_indices, sizeof(__pyx_k_shuffle_indices), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_sp, __pyx_k_sp, sizeof(__pyx_k_sp), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_word_a, __pyx_k_word_a, sizeof(__pyx_k_word_a), 0, 0, 1, 1},
{&__pyx_n_s_word_b, __pyx_k_word_b, sizeof(__pyx_k_word_b), 0, 0, 1, 1},
{&__pyx_n_s_wordbias, __pyx_k_wordbias, sizeof(__pyx_k_wordbias), 0, 0, 1, 1},
{&__pyx_n_s_wordbias_sum_gradients, __pyx_k_wordbias_sum_gradients, sizeof(__pyx_k_wordbias_sum_gradients), 0, 0, 1, 1},
{&__pyx_n_s_wordvec, __pyx_k_wordvec, sizeof(__pyx_k_wordvec), 0, 0, 1, 1},
{&__pyx_n_s_wordvec_sum_gradients, __pyx_k_wordvec_sum_gradients, sizeof(__pyx_k_wordvec_sum_gradients), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 70, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 400, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 609, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 828, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":414
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":491
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":516
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 516, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":566
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 566, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":573
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 573, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":678
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 678, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__15);
__Pyx_GIVEREF(__pyx_slice__15);
/* "View.MemoryView":699
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 699, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "glove/glove_cython.pyx":20
*
*
* def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<<
* double[:, ::1] wordvec_sum_gradients,
* double[::1] wordbias,
*/
__pyx_tuple__20 = PyTuple_Pack(26, __pyx_n_s_wordvec, __pyx_n_s_wordvec_sum_gradients, __pyx_n_s_wordbias, __pyx_n_s_wordbias_sum_gradients, __pyx_n_s_row, __pyx_n_s_col, __pyx_n_s_counts, __pyx_n_s_shuffle_indices, __pyx_n_s_initial_learning_rate, __pyx_n_s_max_count, __pyx_n_s_alpha, __pyx_n_s_max_loss, __pyx_n_s_no_threads, __pyx_n_s_dim, __pyx_n_s_no_cooccurrences, __pyx_n_s_word_a, __pyx_n_s_word_b, __pyx_n_s_count, __pyx_n_s_learning_rate, __pyx_n_s_gradient, __pyx_n_s_prediction, __pyx_n_s_entry_weight, __pyx_n_s_loss, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_shuffle_index); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
__pyx_codeobj__21 = (PyObject*)__Pyx_PyCode_New(13, 0, 26, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__20, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_glove_glove_cython_pyx, __pyx_n_s_fit_vectors, 20, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__21)) __PYX_ERR(0, 20, __pyx_L1_error)
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__22);
__Pyx_GIVEREF(__pyx_tuple__22);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__26);
__Pyx_GIVEREF(__pyx_tuple__26);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__27 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
__pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
__pyx_type___pyx_array.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
__pyx_type___pyx_MemviewEnum.tp_print = 0;
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
__pyx_type___pyx_memoryview.tp_print = 0;
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error)
__pyx_type___pyx_memoryviewslice.tp_print = 0;
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#if PY_MAJOR_VERSION < 3
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC void
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#else
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initglove_cython(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initglove_cython(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_glove_cython(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_glove_cython(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_glove_cython(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
static PyThread_type_lock __pyx_t_3[8];
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'glove_cython' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_glove_cython(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("glove_cython", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_glove__glove_cython) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "glove.glove_cython")) {
if (unlikely(PyDict_SetItemString(modules, "glove.glove_cython", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error;
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "glove/glove_cython.pyx":4
* #cython: boundscheck=False, wraparound=False, cdivision=True, initializedcheck=False
*
* import numpy as np # <<<<<<<<<<<<<<
* import scipy.sparse as sp
* import collections
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "glove/glove_cython.pyx":5
*
* import numpy as np
* import scipy.sparse as sp # <<<<<<<<<<<<<<
* import collections
* from cython.parallel import parallel, prange
*/
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_n_s__19);
__Pyx_GIVEREF(__pyx_n_s__19);
PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s__19);
__pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_sparse, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_d, __pyx_n_s_sp, __pyx_t_2) < 0) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "glove/glove_cython.pyx":6
* import numpy as np
* import scipy.sparse as sp
* import collections # <<<<<<<<<<<<<<
* from cython.parallel import parallel, prange
*
*/
__pyx_t_2 = __Pyx_Import(__pyx_n_s_collections, 0, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_collections, __pyx_t_2) < 0) __PYX_ERR(0, 6, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "glove/glove_cython.pyx":20
*
*
* def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<<
* double[:, ::1] wordvec_sum_gradients,
* double[::1] wordbias,
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_5glove_12glove_cython_1fit_vectors, NULL, __pyx_n_s_glove_glove_cython); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_fit_vectors, __pyx_t_2) < 0) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "glove/glove_cython.pyx":1
* #!python # <<<<<<<<<<<<<<
* #cython: boundscheck=False, wraparound=False, cdivision=True, initializedcheck=False
*
*/
__pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_3[0] = PyThread_allocate_lock();
__pyx_t_3[1] = PyThread_allocate_lock();
__pyx_t_3[2] = PyThread_allocate_lock();
__pyx_t_3[3] = PyThread_allocate_lock();
__pyx_t_3[4] = PyThread_allocate_lock();
__pyx_t_3[5] = PyThread_allocate_lock();
__pyx_t_3[6] = PyThread_allocate_lock();
__pyx_t_3[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_3, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":545
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 545, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 545, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":991
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 991, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 991, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_2) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init glove.glove_cython", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init glove.glove_cython");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (memviewslice->memview || memviewslice->data) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview || (PyObject *) memview == Py_None)
return;
if (__pyx_get_slice_count(memview) < 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview ) {
return;
} else if ((PyObject *) memview == Py_None) {
memslice->memview = NULL;
return;
}
if (__pyx_get_slice_count(memview) <= 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
length = stop - start;
if (unlikely(length <= 0))
return PyUnicode_FromUnicode(NULL, 0);
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* WriteUnraisableException */
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback, CYTHON_UNUSED int nogil) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_PyThreadState_declare
#ifdef WITH_THREAD
PyGILState_STATE state;
if (nogil)
state = PyGILState_Ensure();
#ifdef _MSC_VER
else state = (PyGILState_STATE)-1;
#endif
#endif
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
#ifdef WITH_THREAD
if (nogil)
PyGILState_Release(state);
#endif
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD;
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD;
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD;
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD;
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto GOOD;
BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (buf->strides[dim] != sizeof(void *)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (buf->strides[dim] != buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (spec & (__Pyx_MEMVIEW_PTR)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (buf->suboffsets) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1)
{
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (buf->ndim != ndim) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned) buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (!__pyx_check_strides(buf, i, ndim, spec))
goto fail;
if (!__pyx_check_suboffsets(buf, i, ndim, spec))
goto fail;
}
if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1,
&__Pyx_TypeInfo_int, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
kronecker.h | /* Copyright (C) 2009-2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#ifndef KRONECKER_H
#define KRONECKER_H
#include "user_settings.h"
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
#include "definitions.h"
#include "generator_config.h"
#include "generator_io.h"
#include "hash.hpp"
#include "utils.h"
#include "user_settings.h"
#include "splittable_mrg.h"
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
/* Initiator settings: for faster random number generation, the initiator
* probabilities are defined as fractions (a = INITIATOR_A_NUMERATOR /
* INITIATOR_DENOMINATOR, b = c = INITIATOR_BC_NUMERATOR /
* INITIATOR_DENOMINATOR, d = 1 - a - b - c. */
#define INITIATOR_A_NUMERATOR 2500
#define INITIATOR_BC_NUMERATOR 2500
#define INITIATOR_DENOMINATOR 10000
/* If this macro is defined to a non-zero value, use SPK_NOISE_LEVEL /
* INITIATOR_DENOMINATOR as the noise parameter to use in introducing noise
* into the graph parameters. The approach used is from "A Hitchhiker's Guide
* to Choosing Parameters of Stochastic Kronecker Graphs" by C. Seshadhri, Ali
* Pinar, and Tamara G. Kolda (http://arxiv.org/abs/1102.5046v1), except that
* the adjustment here is chosen based on the current level being processed
* rather than being chosen randomly. */
#define SPK_NOISE_LEVEL 0
/* #define SPK_NOISE_LEVEL 1000 -- in INITIATOR_DENOMINATOR units */
namespace kagen {
#ifdef GENERATOR_USE_PACKED_EDGE_TYPE
typedef struct packed_edge {
uint32_t v0_low;
uint32_t v1_low;
uint32_t high; /* v1 in high half, v0 in low half */
} packed_edge;
static inline int64_t get_v0_from_edge(const packed_edge* p) {
return (p->v0_low | ((int64_t)((int16_t)(p->high & 0xFFFF)) << 32));
}
static inline int64_t get_v1_from_edge(const packed_edge* p) {
return (p->v1_low | ((int64_t)((int16_t)(p->high >> 16)) << 32));
}
static inline void write_edge(packed_edge* p, int64_t v0, int64_t v1) {
p->v0_low = (uint32_t)v0;
p->v1_low = (uint32_t)v1;
p->high = (uint32_t)(((v0 >> 32) & 0xFFFF) | (((v1 >> 32) & 0xFFFF) << 16));
}
#else
typedef struct packed_edge {
int64_t v0;
int64_t v1;
} packed_edge;
static inline int64_t get_v0_from_edge(const packed_edge* p) {
return p->v0;
}
static inline int64_t get_v1_from_edge(const packed_edge* p) {
return p->v1;
}
static inline void write_edge(packed_edge* p, int64_t v0, int64_t v1) {
p->v0 = v0;
p->v1 = v1;
}
#endif
template <typename EdgeCallback>
class Kronecker {
public:
Kronecker(PGeneratorConfig &config, const PEID rank,
const EdgeCallback &cb)
: config_(config),
rank_(rank),
io_(config),
cb_(cb) {
MPI_Comm_size(MPI_COMM_WORLD, &size_);
// Init variables
from_ = 0;
to_ = config_.n - 1;
log_n_ = log2(config_.n);
edges_per_pe_ = config_.m / config_.k;
SInt remaining_edges = config_.m % config_.k;
num_edges_ = edges_per_pe_ + ((SInt)rank < remaining_edges);
}
void Generate() {
uint_fast32_t seed[5];
make_mrg_seed(sampling::Spooky::hash((config_.seed + 1) * size_), sampling::Spooky::hash(rank_), seed);
mrg_state state;
mrg_seed(&state, seed);
uint64_t scramble1_, scramble2_; /* Values for scrambling */
{
mrg_state new_state = state;
mrg_skip(&new_state, 50, 7, 0);
scramble1_ = mrg_get_uint_orig(&new_state);
scramble1_ *= UINT64_C(0xFFFFFFFF);
scramble1_ += mrg_get_uint_orig(&new_state);
scramble2_ = mrg_get_uint_orig(&new_state);
scramble2_ *= UINT64_C(0xFFFFFFFF);
scramble2_ += mrg_get_uint_orig(&new_state);
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (SInt i = 0; i < num_edges_; ++i) {
mrg_state new_state = state;
mrg_skip(&new_state, 0, (uint64_t)i, 0);
GenerateEdge(config_.n, 0, &new_state);
}
}
void Output() {
#ifdef OUTPUT_EDGES
io_.OutputEdges();
#else
io_.OutputDist();
#endif
}
std::pair<SInt, SInt> GetVertexRange() {
return std::make_pair(from_, to_);
}
SInt NumberOfEdges() const { return io_.NumEdges(); }
private:
// Config
PGeneratorConfig &config_;
PEID size_, rank_;
// I/O
GeneratorIO<> io_;
EdgeCallback cb_;
// Constants and variables
int log_n_;
SInt from_, to_;
SInt num_edges_;
int64_t scramble1_, scramble2_;
SInt edges_per_pe_;
int Bernoulli(mrg_state* st, int level, int nlevels) {
#if SPK_NOISE_LEVEL == 0
/* Avoid warnings */
(void)level;
(void)nlevels;
#endif
/* Generate a pseudorandom number in the range [0, INITIATOR_DENOMINATOR)
* without modulo bias. */
static const uint32_t limit = (UINT32_C(0x7FFFFFFF) % INITIATOR_DENOMINATOR);
uint32_t val = mrg_get_uint_orig(st);
if (/* Unlikely */ val < limit) {
do {
val = mrg_get_uint_orig(st);
} while (val < limit);
}
#if SPK_NOISE_LEVEL == 0
int spk_noise_factor = 0;
#else
int spk_noise_factor = 2 * SPK_NOISE_LEVEL * level / nlevels - SPK_NOISE_LEVEL;
#endif
unsigned int adjusted_bc_numerator = (unsigned int)(INITIATOR_BC_NUMERATOR + spk_noise_factor);
val %= INITIATOR_DENOMINATOR;
if (val < adjusted_bc_numerator) return 1;
val = (uint32_t)(val - adjusted_bc_numerator);
if (val < adjusted_bc_numerator) return 2;
val = (uint32_t)(val - adjusted_bc_numerator);
#if SPK_NOISE_LEVEL == 0
if (val < INITIATOR_A_NUMERATOR) return 0;
#else
if (val < INITIATOR_A_NUMERATOR * (INITIATOR_DENOMINATOR - 2 * INITIATOR_BC_NUMERATOR) / (INITIATOR_DENOMINATOR - 2 * adjusted_bc_numerator)) return 0;
#endif
#if SPK_NOISE_LEVEL == 0
/* Avoid warnings */
(void)level;
(void)nlevels;
#endif
return 3;
}
/* Reverse bits in a number; this should be optimized for performance
* (including using bit- or byte-reverse intrinsics if your platform has them).
* */
static inline uint64_t bitreverse(uint64_t x) {
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
#define USE_GCC_BYTESWAP /* __builtin_bswap* are in 4.3 but not 4.2 */
#endif
#ifdef FAST_64BIT_ARITHMETIC
/* 64-bit code */
#ifdef USE_GCC_BYTESWAP
x = __builtin_bswap64(x);
#else
x = (x >> 32) | (x << 32);
x = ((x >> 16) & UINT64_C(0x0000FFFF0000FFFF)) | ((x & UINT64_C(0x0000FFFF0000FFFF)) << 16);
x = ((x >> 8) & UINT64_C(0x00FF00FF00FF00FF)) | ((x & UINT64_C(0x00FF00FF00FF00FF)) << 8);
#endif
x = ((x >> 4) & UINT64_C(0x0F0F0F0F0F0F0F0F)) | ((x & UINT64_C(0x0F0F0F0F0F0F0F0F)) << 4);
x = ((x >> 2) & UINT64_C(0x3333333333333333)) | ((x & UINT64_C(0x3333333333333333)) << 2);
x = ((x >> 1) & UINT64_C(0x5555555555555555)) | ((x & UINT64_C(0x5555555555555555)) << 1);
return x;
#else
/* 32-bit code */
uint32_t h = (uint32_t)(x >> 32);
uint32_t l = (uint32_t)(x & UINT32_MAX);
#ifdef USE_GCC_BYTESWAP
h = __builtin_bswap32(h);
l = __builtin_bswap32(l);
#else
h = (h >> 16) | (h << 16);
l = (l >> 16) | (l << 16);
h = ((h >> 8) & UINT32_C(0x00FF00FF)) | ((h & UINT32_C(0x00FF00FF)) << 8);
l = ((l >> 8) & UINT32_C(0x00FF00FF)) | ((l & UINT32_C(0x00FF00FF)) << 8);
#endif
h = ((h >> 4) & UINT32_C(0x0F0F0F0F)) | ((h & UINT32_C(0x0F0F0F0F)) << 4);
l = ((l >> 4) & UINT32_C(0x0F0F0F0F)) | ((l & UINT32_C(0x0F0F0F0F)) << 4);
h = ((h >> 2) & UINT32_C(0x33333333)) | ((h & UINT32_C(0x33333333)) << 2);
l = ((l >> 2) & UINT32_C(0x33333333)) | ((l & UINT32_C(0x33333333)) << 2);
h = ((h >> 1) & UINT32_C(0x55555555)) | ((h & UINT32_C(0x55555555)) << 1);
l = ((l >> 1) & UINT32_C(0x55555555)) | ((l & UINT32_C(0x55555555)) << 1);
return ((uint64_t)l << 32) | h; /* Swap halves */
#endif
}
/* Apply a permutation to scramble vertex numbers; a randomly generated
* permutation is not used because applying it at scale is too expensive. */
inline int64_t Scramble(int64_t v0) {
uint64_t v = (uint64_t)v0;
v += scramble1_ + scramble2_;
v *= (scramble1_ | UINT64_C(0x4519840211493211));
v = (bitreverse(v) >> (64 - log_n_));
assert ((v >> log_n_) == 0);
v *= (scramble2_ | UINT64_C(0x3050852102C843A5));
v = (bitreverse(v) >> (64 - log_n_));
assert ((v >> log_n_) == 0);
return (int64_t)v;
}
/* Make a single graph edge using a pre-set MRG state. */
void GenerateEdge(int64_t n, int level, mrg_state* st) {
int64_t base_src = 0, base_tgt = 0;
while (n > 1) {
int square = Bernoulli(st, level, log_n_);
int src_offset = square / 2;
int tgt_offset = square % 2;
assert (base_src <= base_tgt);
if (base_src == base_tgt) {
/* Clip-and-flip for undirected graph */
if (src_offset > tgt_offset) {
int temp = src_offset;
src_offset = tgt_offset;
tgt_offset = temp;
}
}
n /= 2;
++level;
base_src += n * src_offset;
base_tgt += n * tgt_offset;
}
#ifdef OUTPUT_EDGES
io_.PushEdge(Scramble(base_src), Scramble(base_tgt));
#else
io_.UpdateDist(Scramble(base_src));
io_.UpdateDist(Scramble(base_tgt));
#endif
}
};
}
#endif /* KRONECKER_H */
|
GB_binop__bset_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint32)
// C=scalar+B GB (_bind1st__bset_uint32)
// C=scalar+B' GB (_bind1st_tran__bset_uint32)
// C=A+scalar GB (_bind2nd__bset_uint32)
// C=A'+scalar GB (_bind2nd_tran__bset_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_BITSET (aij, bij, uint32_t, 32)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, uint32_t, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT32 || GxB_NO_BSET_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, uint32_t, 32) ; \
}
GrB_Info GB (_bind1st_tran__bset_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, uint32_t, 32) ; \
}
GrB_Info GB (_bind2nd_tran__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
single_objective_ga.h | #if defined(__posix) || defined(__unix) || defined(__linux) || defined(__APPLE__)
// #pragma GCC diagnostic ignored "-Wreorder"
// #pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wformat="
#pragma GCC diagnostic ignored "-Wsign-compare"
#endif
#ifndef __SINGLE_OBJECTIVE_GA_H__
#define __SINGLE_OBJECTIVE_GA_H__
#include <utility>
#include <numeric>
#include "base_ga.h"
namespace algorithms
{
template<class Chromosome, class FitnessFunction>
class SingleObjectiveGA : public BaseGA<Chromosome, FitnessFunction>
{
using BaseGA<Chromosome, FitnessFunction>::BaseGA;
using BaseGA<Chromosome, FitnessFunction>::Select;
using BaseGA<Chromosome, FitnessFunction>::Reproduce;
using BaseGA<Chromosome, FitnessFunction>::fitness_function;
using BaseGA<Chromosome, FitnessFunction>::indices;
using BaseGA<Chromosome, FitnessFunction>::parents;
using BaseGA<Chromosome, FitnessFunction>::offspring;
typedef typename BaseGA<Chromosome, FitnessFunction>::Population Population;
// Updates the parents with the best individuals from the offspring population.
void Replace()
{
auto on_objective_and_constraints = [](const Chromosome &p, const Chromosome &q)
{
// If either p or q is infeasible
if (p.constraints != utils::Approx(q.constraints)) {
return p.constraints < q.constraints;
}
return p.objective < q.objective;
};
std::sort(offspring.begin(), offspring.end(), on_objective_and_constraints);
Population combo(parents.size() + offspring.size());
std::merge(
std::make_move_iterator(parents.begin()),
std::make_move_iterator(parents.end()),
std::make_move_iterator(offspring.begin()),
std::make_move_iterator(offspring.end()),
combo.begin(),
on_objective_and_constraints
);
parents = Population(
std::make_move_iterator(combo.begin()),
std::make_move_iterator(combo.begin() + parents.size())
);
}
inline bool Tournament(const Chromosome &p, const Chromosome &q) override
{
// If either p or q is infeasible
if (p.constraints != utils::Approx(q.constraints)) {
return p.constraints < q.constraints;
}
if (p.objective < q.objective) {
return true;
}
else if (p.objective > q.objective) {
return false;
}
return utils::random() < 0.5;
}
public:
// Creates new parent population.
template<class... ChromosomeParams>
void Init(
int popsize,
ChromosomeParams... params
)
{
indices.resize(popsize);
std::iota(indices.begin(), indices.end(), 0);
parents.reserve(popsize);
offspring.reserve(popsize);
parents.resize(0);
while (popsize-- > 0) {
parents.push_back(std::move(Chromosome(params...)));
}
#pragma omp parallel for
for (int i = 0; i < parents.size(); ++i) {
fitness_function(parents[i]);
}
// Sorts in an descending order of objective
// and ascending order of constraints values
std::sort(parents.begin(), parents.end(),
[](const Chromosome &p, const Chromosome &q)
{
// If either p or q is infeasible
if (p.constraints != utils::Approx(q.constraints)) {
return p.constraints < q.constraints;
}
return p.objective < q.objective;
}
);
}
void Update()
{
Select();
Reproduce();
#pragma omp parallel for
for (int i = 0; i < offspring.size(); ++i) {
fitness_function(offspring[i]);
}
Replace();
}
// Returns top parent individual.
Chromosome Top()
{
return parents[0];
}
// Returns top parent individual.
Chromosome Top(Population solutions)
{
std::sort(solutions.begin(), solutions.end(),
[](const Chromosome &p, const Chromosome &q)
{
// If either p or q is infeasible
if (p.constraints != utils::Approx(q.constraints)) {
return p.constraints < q.constraints;
}
return p.objective < q.objective;
}
);
return std::move(solutions[0]);
}
};
}
#endif |
parallelReadTiff.c | #include "tiffio.h"
#include <stdio.h>
#include <stdint.h>
#include "omp.h"
//mex -v COPTIMFLAGS="-O3 -fwrapv -DNDEBUG" CFLAGS='$CFLAGS -O3 -fopenmp' LDFLAGS='$LDFLAGS -O3 -fopenmp' '-I/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' '-L/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' -ltiff /clusterfs/fiona/matthewmueller/parallelTiffTesting/main.c
void DummyHandler(const char* module, const char* fmt, va_list ap)
{
// ignore errors and warnings
}
void* mallocDynamic(uint64_t x, uint64_t bits){
switch(bits){
case 8:
return malloc(x*sizeof(uint8_t));
case 16:
return malloc(x*sizeof(uint16_t));
case 32:
return malloc(x*sizeof(float));
case 64:
return malloc(x*sizeof(double));
default:
printf("Image is not 8/16 bit, single, or double. Using single.");
return malloc(x*sizeof(float));
}
}
void readTiffParallel(uint64_t x, uint64_t y, uint64_t z, char* fileName, void* tiff, uint64_t bits, uint64_t startSlice){
int32_t numWorkers = omp_get_max_threads();
int32_t batchSize = (z-1)/numWorkers+1;
int32_t w;
#pragma omp parallel for
for(w = 0; w < numWorkers; w++){
TIFF* tif = TIFFOpen(fileName, "r");
void* buffer = mallocDynamic(x, bits);
for(int64_t dir = startSlice+(w*batchSize); dir < startSlice+((w+1)*batchSize); dir++){
if(dir>=z+startSlice) break;
TIFFSetDirectory(tif, (uint64_t)dir);
for (int64_t i = 0; i < y; i++)
{
//loading the data into a buffer
switch(bits){
case 8:
TIFFReadScanline(tif, (uint8_t*)buffer, i, 0);
// Map Values to flip x and y for MATLAB
for(int64_t j = 0; j < x; j++){
((uint8_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint8_t*)buffer)[j];
}
break;
case 16:
TIFFReadScanline(tif, (uint16_t*)buffer, i, 0);
// Map Values to flip x and y for MATLAB
for(int64_t j = 0; j < x; j++){
((uint16_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint16_t*)buffer)[j];
}
break;
case 32:
TIFFReadScanline(tif, (float*)buffer, i, 0);
// Map Values to flip x and y for MATLAB
for(int64_t j = 0; j < x; j++){
((float*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((float*)buffer)[j];
}
break;
case 64:
TIFFReadScanline(tif, (double*)buffer, i, 0);
// Map Values to flip x and y for MATLAB
for(int64_t j = 0; j < x; j++){
((double*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((double*)buffer)[j];
}
break;
}
}
}
free(buffer);
TIFFClose(tif);
}
}
void* readTiffParallelWrapper(char* fileName)
{
TIFFSetWarningHandler(DummyHandler);
TIFF* tif = TIFFOpen(fileName, "r");
if(!tif) return NULL;
uint64_t x = 1,y = 1,z = 1,bits = 1, startSlice = 0;
TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &x);
TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &y);
uint16_t s = 0, m = 0, t = 1;
while(TIFFSetDirectory(tif,t)){
s = t;
t *= 8;
if(s > t){
t = 65535;
printf("Number of slices > 32768");
break;
}
}
while(s != t){
m = (s+t+1)/2;
if(TIFFSetDirectory(tif,m)){
s = m;
}
else{
if(m > 0) t = m-1;
else t = m;
}
}
z = s+1;
TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits);
TIFFClose(tif);
uint64_t dim[3];
dim[0] = y;
dim[1] = x;
dim[2] = z;
if(bits == 8){
uint8_t* tiff = (uint8_t*)malloc(x*y*z*sizeof(uint8_t));
readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice);
return (void*)tiff;
}
else if(bits == 16){
uint16_t* tiff = (uint16_t*)malloc(x*y*z*sizeof(uint16_t));
readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice);
return (void*)tiff;
}
else if(bits == 32){
float* tiff = (float*)malloc(x*y*z*sizeof(float));
readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice);
return (void*)tiff;
}
else if(bits == 64){
double* tiff = (double*)malloc(x*y*z*sizeof(double));
readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice);
return (void*)tiff;
}
else{
return NULL;
}
}
|
ofmo-inter-frag.c | /**
* @file ofmo-inter-frag.c
* フラグメント電子状態計算のクーロン相互作用の近似レベルなどを
* 決めるモノマー間距離の計算などに関する関数群を定義している。
*
* TODO:
* モノマー間距離はdouble型ではなく、float型で十分では?
*
* */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "ofmo-def.h"
#include "ofmo-data.h"
#include "ofmo-mserv-cont.h"
/* Wan der Waals半径(1オフセット)*/
static double Van_Der_Waals_Radius[] = {0.0,
1.20, 1.20,
1.37,1.45,1.45,1.50,1.50,1.40,1.35,1.30,
1.57,1.36,1.24,1.17,1.80,1.75,1.70,2.50,
2.50,2.50,2.50,2.50,2.50,2.50,2.50,2.50,
2.50,2.50,2.50,2.50,2.50,2.50,2.50,2.50,
2.30,2.50,
2.50,2.50,2.50,2.50,2.50,2.50,2.50,2.50,
2.50,2.50,2.50,2.50,2.50,2.50,2.50,2.50,
2.50,2.50,
2.50,2.50,2.50,2.50,2.50,2.50,2.50,2.50,
2.50,2.50,2.50,2.50,2.50,2.50,2.50,2.50,
2.50,2.50,2.50,2.50,2.50,2.50,2.50,2.50,
2.50,2.50,2.50,2.50,2.50,2.50,2.50,2.50,
2.50,2.50,2.50,2.50,2.50,2.50,2.50,2.50,
2.50,2.50,2.50,2.50,2.50,2.50 };
static void init_vdw() {
double inv_bohr;
int n, i;
inv_bohr = 1.e0 / BOHR_RADIUS;
n = sizeof(Van_Der_Waals_Radius) / sizeof(double);
for ( i=0; i<n; i++ ) Van_Der_Waals_Radius[i] *= inv_bohr;
}
/** 特定のモノマーが関係するモノマー間距離を求める関数
*
*
* */
static int ofmo_calc_inter_fragment_distance_v0(
const int ifrag, double dist[] ) {
static int called = false;
static double *atom_x, *atom_y, *atom_z;
static int *atomic_number, **ifatom, *nfatom;
static int nfrag;
if ( !called ) {
int ierr;
ierr = ofmo_data_get_vals( "nfrag atn atx aty atz ifatom nfatom",
&nfrag, &atomic_number, &atom_x, &atom_y, &atom_z,
&ifatom, &nfatom );
init_vdw();
if ( ierr != 0 ) return -1;
called = true;
}
int jfrag;
#pragma omp parallel for
for ( jfrag=0; jfrag<nfrag; jfrag++ ) {
int iat, iatm, iatn, jat, jatm, jatn;
double rix, riy, riz, rijx, rijy, rijz, rij2, rij;
double vdw_min, vdwi, vdwj, vdw_dist;
if ( jfrag == ifrag ) {
dist[jfrag] = 0.e0;
} else {
vdw_min = HUGE_VAL;
for ( iat=0; iat<nfatom[ifrag]; iat++ ) {
iatm = ifatom[ifrag][iat];
iatn = atomic_number[iatm];
rix = atom_x[iatm];
riy = atom_y[iatm];
riz = atom_z[iatm];
vdwi = Van_Der_Waals_Radius[iatn];
for ( jat=0; jat<nfatom[jfrag]; jat++ ) {
jatm = ifatom[jfrag][jat];
jatn = atomic_number[jatm];
vdwj = Van_Der_Waals_Radius[jatn];
rijx = rix - atom_x[jatm];
rijy = riy - atom_y[jatm];
rijz = riz - atom_z[jatm];
rij2 = rijx*rijx + rijy*rijy + rijz*rijz;
rij = sqrt( rij2 );
vdw_dist = rij / (vdwi+vdwj);
if ( vdw_dist < vdw_min ) vdw_min = vdw_dist;
}
}
dist[jfrag] = vdw_min;
}
}
return 0;
}
static int *itmp1 = NULL;
static double *dtmp1 = NULL;
static double *dtmp2 = NULL;
static void dealloc() {
Free( itmp1 );
Free( dtmp1 );
Free( dtmp2 );
}
static int alloc( int nfrag ) {
static int called = false;
if ( !called ) {
dealloc();
itmp1 = (int*)malloc( sizeof(int) * nfrag );
dtmp1 = (double*)malloc( sizeof(double) * nfrag );
dtmp2 = (double*)malloc( sizeof(double) * nfrag );
atexit( dealloc );
called = true;
}
return 0;
}
/** モノマー間距離を計算する
ワーカー(コミュニケータ)内のプロセスを用いて、モノマー間距離の
リストを作成する。
一回だけ呼び出せばよい。また、1つのワーカーから呼び出すだけでよい。
*/
int ofmo_make_inter_frag_distance_list( MPI_Comm comm ) {
int nfrag, ierr, ifrag;
int myrank, nprocs;
double *inter_fragment_distance = NULL;
if ( ofmo_data_get_vals("nfrag", &nfrag ) != 0 ) return -1;
alloc( nfrag );
inter_fragment_distance = dtmp1;
MPI_Comm_rank( comm, &myrank );
MPI_Comm_size( comm, &nprocs );
/* モノマー間距離の計算 */
for ( ifrag=myrank; ifrag<nfrag; ifrag+=nprocs ) {
ierr = ofmo_calc_inter_fragment_distance_v0( ifrag,
inter_fragment_distance );
if ( ierr != 0 ) return -1;
ofmo_worker_put( OFMO_DISTA, ifrag, inter_fragment_distance );
}
MPI_Barrier( comm );
return 0;
}
/* Is monomer ifrag in fragment (true) or not (false) */
static int is_in_fragment( const int ifrag,
const int nmonomer, const int monomer_list[] ) {
for ( int i=0; i<nmonomer; i++ ) {
if ( monomer_list[i] == ifrag ) return true;
}
return false;
}
/** 環境ポテンシャル計算における近似レベルを返す関数
*
* FMO計算におけるフラグメント(モノマー、ダイマーなど(\f$x\f$))
* 電子状態計算を行う場合には、周辺モノマー(\f$I\f$)からの静電相互作用項
* @f${}^x\mbox{\boldmath$V$}_I@f$
* を計算する必要がある。
* この環境ポテンシャル項は、計算量の削減を行うために、電子状態計算を
* 行うフラグメントと環境ポテンシャル計算を行う対象モノマー間との距離に
* 応じて、近似計算を行う。
* @f{eqnarray*}{
* \left( {}^x\mbox{\boldmath$V$}_I \right)_{\mu\nu} &=&
* \sum_{\sigma\lambda} (\mbox{\boldmath$D$}_{I})_{\sigma\lambda}
* (\mu\nu|\sigma\lambda) \\
* &\approx& \sum_{\sigma} \left( \mbox{\boldmath$D$}_I
* \mbox{\boldmath$S$}_I \right)_{\sigma\sigma} (\mu\nu|\sigma\sigma) \\
* &\approx& \sum_{A\in I} \int dr \phi_\mu(r) \frac{Q_A}{|r-A|}
* \phi_\nu(r)
* @f}
*
* 近似なしの場合には通常の2電子積分と同じオーダーの計算量の
* 4中心クーロン積分を計算する必要があるが、1つ目の近似(pop近似)を
* 行うと計算量が少ない3中心クーロン積分の計算だけで済む。さらに、
* 二つ目の近似(点電荷近似)を用いると、1電子積分の1種である2中心
* クーロン積分だけの計算で済む。この切り替えは、フラグメントと
* モノマー間の距離を元にして行う。
* この関数内部で、\c nmonomer と\c monomer_list[] で指定された
* フラグメントと周辺モノマー間の環境ポテンシャルの近似レベルリスト
* が生成される。このリストは\c ofmo_get_approx_level 関数を用いて
* 参照することが出来る。
* また、この関数の引数のポインタ変数を経由して、4中心クーロン積分が
* 必要な相手モノマー数と相手モノマー番号のリスト(\c *nifc4c,
* \c joblist_ifc4c[] )と、3中心クーロン積分が必要な相手モノマー数と
* 相手モノマー番号のリスト(\c *nifc3c, \c joblist_ifc3c[] )とを、
* 呼び出し元に返すことが出来る。
*
* @param[in] nmonomer フラグメントを構成するモノマー数
* @param[in] monomer_list[] フラグメントを構成するモノマー番号のリスト
* @param[out] *nifc4c 4中心積分が必要な近似なしの相手モノマー数
* @param[out] joblist_ifc4c[] 4中心積分が必要な近似なしの相手モノマー
* 番号のリスト
* @param[out] *nifc3c 3中心積分が必要なpop近似を行う相手モノマー数
* @param[out] joblist_ifc3c[] 3中心積分が必要なpop近似を行う相手モノマー
* 番号のリスト
*
* @ingroup ofmo-calc
*
* */
int ofmo_make_approx_level( const int nmonomer, const int monomer_list[],
int *nifc4c, int joblist_ifc4c[],
int *nifc3c, int joblist_ifc3c[],
MPI_Comm comm ) {
static int nfrag, called = false;
static double laop, lptc;
if ( !called ) {
ofmo_data_get_vals("nfrag laop lptc", &nfrag, &laop, &lptc);
alloc( nfrag );
called = true;
}
double *rmin = dtmp1, *dist = dtmp2;
int *approx_level = itmp1;
int i, ifrag, jfrag, idum[2], root=0;
/* make rmin list for fragment */
int myrank;
MPI_Comm_rank( comm, &myrank );
if ( myrank == root ) {
ifrag = monomer_list[0];
ofmo_worker_get( OFMO_DISTA, ifrag, rmin );
for ( i=1; i<nmonomer; i++ ) {
ifrag = monomer_list[i];
ofmo_worker_get( OFMO_DISTA, ifrag, dist );
for ( jfrag=0; jfrag<nfrag; jfrag++ ) {
if ( rmin[jfrag] > dist[jfrag] ) rmin[jfrag] = dist[jfrag];
}
}
/* */
int n4, n3;
n4 = n3 = 0;
for ( ifrag=0; ifrag<nfrag; ifrag++ ) {
if ( is_in_fragment( ifrag, nmonomer, monomer_list ) ) {
approx_level[ifrag] = OFMO_IFC0C;
} else if ( rmin[ifrag] < laop ) {
joblist_ifc4c[n4] = ifrag;
approx_level[ifrag] = OFMO_IFC4C;
n4++;
} else if ( rmin[ifrag] < lptc ) {
joblist_ifc3c[n3] = ifrag;
approx_level[ifrag] = OFMO_IFC3C;
n3++;
} else {
approx_level[ifrag] = OFMO_IFC2C;
}
}
*nifc4c = n4;
*nifc3c = n3;
idum[0] = n4;
idum[1] = n3;
}
MPI_Bcast( idum, 2, MPI_INT, root, comm );
if ( myrank != root ) {
*nifc4c = idum[0];
*nifc3c = idum[1];
}
MPI_Bcast( joblist_ifc4c, *nifc4c, MPI_INT, root, comm );
MPI_Bcast( joblist_ifc3c, *nifc3c, MPI_INT, root, comm );
MPI_Bcast( approx_level, nfrag, MPI_INT, root, comm );
return 0;
}
/** 現在計算中のフラグメントと指定されたモノマー間の環境ポテンシャルの
* 近似レベルを返す
*
* 直近で呼ばれた\c ofmo_make_approx_level 関数で与えられたフラグメント
* と、この関数の引数\c ifrag で指定されたモノマーとの間の環境ポテンシャル
* の近似レベルを返す。
*
* @param[in] ifrag 相手モノマーの番号
*
* @return 環境ポテンシャルの近似レベル
*
* @ingroup ofmo-calc
*
* */
int ofmo_get_approx_level( const int ifrag ) {
return itmp1[ifrag];
}
|
dependences.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
#include <omp.h>
#include <math.h>
#include <unistd.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_ids(0);
printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value,
&x);
#pragma omp task depend(out : x)
{
x++;
delay(100);
}
print_fuzzy_address(1);
print_ids(0);
#pragma omp task depend(in : x)
{ x = -1; }
print_ids(0);
}
}
x++;
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_inout)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]], codeptr_ra={{0x[0-f]+}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_in)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_dependence_pair:
// CHECK-SAME: first_task_id=[[FIRST_TASK]], second_task_id=[[SECOND_TASK]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]],
// CHECK-SAME: reenter_frame=[[NULL]]
|
sh.c | #include <emproc/sh.h>
#ifndef OPENCL_MODE
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#endif
#define PI 3.1415926535897932384626433832795028841971693993751058
#define PI4 12.566370614359172953850573533118011536788677597500423
#define PI16 50.265482457436691815402294132472046147154710390001693
#define PI64 201.06192982974676726160917652988818458861884156000677
#define SQRT_PI 1.7724538509055160272981674833411451827975494561223871
/* 1.0 / (2.0 * SQRT_PI) */
#define K0 0.28209479177
/* sqrt(3.0 / PI4) */
#define K1 0.4886025119
/* sqrt(15.0 / PI4) */
#define K2 1.09254843059
/* -sqrt(15.0 / PI4) */
#define K3 -1.09254843059
/* sqrt(5.0 / PI16) */
#define K4 0.31539156525
/* sqrt(15.0 / PI16) */
#define K5 0.54627421529
/* -sqrt(70.0 / PI64) */
#define K6 -0.59004358992
/* sqrt(105.0 / PI4) */
#define K7 2.89061144264
/* -sqrt(21.0 / PI16) */
#define K8 -0.64636036822
/* sqrt(7.0 / PI16) */
#define K9 0.37317633259
/* -sqrt(42.0 / PI64) */
#define K10 -0.45704579946
/* sqrt(105.0 / PI16) */
#define K11 1.44530572132
/* -sqrt(70.0 / PI64) */
#define K12 -0.59004358992
/* 3.0 * sqrt(35.0 / PI16) */
#define K13 2.5033429418
/* -3.0 * sqrt(70.0 / PI64) */
#define K14 -1.77013076978
/* 3.0 * sqrt(5.0 / PI16) */
#define K15 0.94617469575
/* -3.0 * sqrt(10.0 / PI64) */
#define K16 -1.33809308711
/* 3.0 * sqrt(5.0 / PI64) */
#define K17 0.47308734787
/* 3.0 * sqrt(35.0 / (4.0 * PI64)) */
#define K18 0.62583573544
void sh_eval_basis5(double* sh_basis, GLOBAL const float* dir)
{
const double x = (double)dir[0];
const double y = (double)dir[1];
const double z = (double)dir[2];
const double x2 = x*x;
const double y2 = y*y;
const double z2 = z*z;
const double z3 = z*z*z;
const double x4 = x*x*x*x;
const double y4 = y*y*y*y;
const double z4 = z*z*z*z;
/* Equations based on data from: http://ppsloan.org/publications/stupid_sH36.pdf */
sh_basis[0] = K0;
sh_basis[1] = -K1 * y;
sh_basis[2] = K1 * z;
sh_basis[3] = -K1 * x;
sh_basis[4] = K2 * y * x;
sh_basis[5] = K3 * y * z;
sh_basis[6] = K4 * (3.0 * z2 - 1.0);
sh_basis[7] = K3 * x * z;
sh_basis[8] = K5 * (x2 - y2);
sh_basis[9] = K6 * y * (3 * x2 - y2);
sh_basis[10] = K7 * y * x * z;
sh_basis[11] = K8 * y * (-1.0 + 5.0 * z2);
sh_basis[12] = K9 * (5.0 * z3 - 3.0 * z);
sh_basis[13] = K10 * x * (-1.0 + 5.0 * z2);
sh_basis[14] = K11 * (x2 - y2) * z;
sh_basis[15] = K12 * x * (x2 - 3.0 * y2);
sh_basis[16] = K13 * x * y * (x2 - y2);
sh_basis[17] = K14 * y * z * (3.0 * x2 - y2);
sh_basis[18] = K15 * y * x * (-1.0 + 7.0 * z2);
sh_basis[19] = K16 * y * z * (-3.0 + 7.0 * z2);
sh_basis[20] = (105.0 * z4 -90.0 * z2 + 9.0) / (16.0 * SQRT_PI);
sh_basis[21] = K16 * x * z * (-3.0 + 7.0 * z2);
sh_basis[22] = K17 * (x2 - y2) * (-1.0 + 7.0 * z2);
sh_basis[23] = K14 * x * z * (x2 - 3.0 * y2);
sh_basis[24] = K18 * (x4 - 6.0 * y2 * x2 + y4);
}
#ifndef OPENCL_MODE
void sh_coeffs(double sh_coeffs[SH_COEFF_NUM][3], struct envmap* em, float* nsa_idx)
{
const size_t face_sz = envmap_face_size(em);
memset(sh_coeffs, 0, SH_COEFF_NUM * 3 * sizeof(double));
#ifndef WITH_OPENMP
float* nsa_ptr = nsa_idx;
#endif
double weight_accum = 0.0;
for (int face = 0; face < 6; ++face) {
#ifdef WITH_OPENMP
#pragma omp parallel for
#endif
for (size_t ydst = 0; ydst < face_sz; ++ydst) {
for (size_t xdst = 0; xdst < face_sz; ++xdst) {
#ifdef WITH_OPENMP
float* nsa_ptr = nsa_idx + ((face * face_sz * face_sz) + ydst * face_sz + xdst) * 4;
#endif
/* Current pixel values */
uint8_t* src_ptr = envmap_pixel_ptr(em, xdst, ydst, face);
const double rr = (double)src_ptr[0] / 255.0;
const double gg = (double)src_ptr[1] / 255.0;
const double bb = (double)src_ptr[2] / 255.0;
/* Calculate SH Basis */
double sh_basis[SH_COEFF_NUM];
sh_eval_basis5(sh_basis, nsa_ptr);
const double weight = (double)nsa_ptr[3];
for (uint8_t ii = 0; ii < SH_COEFF_NUM; ++ii) {
#ifdef WITH_OPENMP
#pragma omp atomic update
#endif
sh_coeffs[ii][0] += rr * sh_basis[ii] * weight;
#ifdef WITH_OPENMP
#pragma omp atomic update
#endif
sh_coeffs[ii][1] += gg * sh_basis[ii] * weight;
#ifdef WITH_OPENMP
#pragma omp atomic update
#endif
sh_coeffs[ii][2] += bb * sh_basis[ii] * weight;
}
weight_accum += weight;
#ifndef WITH_OPENMP
/* Forward index ptr */
nsa_ptr += 4;
#endif
}
}
}
/*
* Normalization.
* This is not really necesarry because usually PI*4 - weightAccum ~= 0.000003
* so it doesn't change almost anything, but it doesn't cost much be more correct.
*/
const double norm = PI4 / weight_accum;
for (uint8_t ii = 0; ii < SH_COEFF_NUM; ++ii) {
sh_coeffs[ii][0] *= norm;
sh_coeffs[ii][1] *= norm;
sh_coeffs[ii][2] *= norm;
}
}
void sh_irradiance(float irr[3], double sh_rgb[SH_COEFF_NUM][3], float dir[3])
{
/* Eval basis for current direction */
double sh_basis[SH_COEFF_NUM];
sh_eval_basis5(sh_basis, dir);
/* Calculate pixel value using sh */
double rgb[3] = {0.0f, 0.0f, 0.0f};
/* Band 0 (factor 1.0) */
rgb[0] += sh_rgb[0][0] * sh_basis[0] * 1.0f;
rgb[1] += sh_rgb[0][1] * sh_basis[0] * 1.0f;
rgb[2] += sh_rgb[0][2] * sh_basis[0] * 1.0f;
/* Band 1 (factor 2/3). */
uint8_t ii = 1;
for (; ii < 4; ++ii) {
rgb[0] += sh_rgb[ii][0] * sh_basis[ii] * (2.0f/3.0f);
rgb[1] += sh_rgb[ii][1] * sh_basis[ii] * (2.0f/3.0f);
rgb[2] += sh_rgb[ii][2] * sh_basis[ii] * (2.0f/3.0f);
}
/* Band 2 (factor 1/4). */
for (; ii < 9; ++ii) {
rgb[0] += sh_rgb[ii][0] * sh_basis[ii] * (1.0f/4.0f);
rgb[1] += sh_rgb[ii][1] * sh_basis[ii] * (1.0f/4.0f);
rgb[2] += sh_rgb[ii][2] * sh_basis[ii] * (1.0f/4.0f);
}
/* Band 3 (factor 0). */
ii = 16;
/* Band 4 (factor -1/24). */
for (; ii < 25; ++ii) {
rgb[0] += sh_rgb[ii][0] * sh_basis[ii] * (-1.0f/24.0f);
rgb[1] += sh_rgb[ii][1] * sh_basis[ii] * (-1.0f/24.0f);
rgb[2] += sh_rgb[ii][2] * sh_basis[ii] * (-1.0f/24.0f);
}
/* Store output */
irr[0] = (float)rgb[0];
irr[1] = (float)rgb[1];
irr[2] = (float)rgb[2];
}
#endif
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: define the width and height of the border.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,const CompositeOperator compose,
ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->alpha_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,compose,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->alpha_color=image->alpha_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the alpha_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
const CompositeOperator compose,ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
accentuate,
highlight,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x < (ssize_t) image->columns) | (y < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass,exception) == MagickFalse)
{
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace,exception);
if ((frame_image->alpha_color.alpha_trait != UndefinedPixelTrait) &&
(frame_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(frame_image,OpaqueAlpha,exception);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
matte=image->alpha_color;
accentuate=matte;
accentuate.red=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.black=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.black+(QuantumRange*AccentuateModulate)));
accentuate.alpha=matte.alpha;
highlight=matte;
highlight.red=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.black=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.black+(QuantumRange*HighlightModulate)));
highlight.alpha=matte.alpha;
shadow=matte;
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.black=QuantumScale*matte.black*ShadowModulate;
shadow.alpha=matte.alpha;
trough=matte;
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.black=QuantumScale*matte.black*TroughModulate;
trough.alpha=matte.alpha;
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,frame_image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
size_t
width;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
/*
Set frame interior pixels.
*/
{
register const Quantum
*p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
SetPixelBackgoundColor(frame_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(frame_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait frame_traits=GetPixelChannelTraits(frame_image,channel);
if ((traits == UndefinedPixelTrait) ||
(frame_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(frame_image,channel,p[i],q);
}
SetPixelRed(frame_image,GetPixelRed(image,p),q);
SetPixelGreen(frame_image,GetPixelGreen(image,p),q);
SetPixelBlue(frame_image,GetPixelBlue(image,p),q);
SetPixelAlpha(frame_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(frame_image);
}
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw bottom of ornamental border.
*/
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image,compose,MagickTrue,x,y,
exception);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise,
ExceptionInfo *exception)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*AccentuateFactor+
(double) foreground*(QuantumRange-AccentuateFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q+=GetPixelChannels(image);
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*TroughFactor+
(double) background*(QuantumRange-TroughFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
merge_sort_openmp.h | // sorts [xs,xe). zs[0:xe-xs) is temporary buffer supplied by caller.
// result is in [xs,xe) if inplace==true, otherwise in zs[0:xe-xs).
void parallel_merge_sort( T* xs, T* xe, T* zs, bool inplace ) {
const size_t SORT_CUT_OFF = 500;
if( xe-xs<=SORT_CUT_OFF ) {
std::stable_sort( xs, xe );
if( !inplace )
std::move( xs, xe, zs );
} else {
T* xm = xs + (xe-xs)/2;
T* zm = zs + (xm-xs);
T* ze = zs + (xe-xs);
#pragma omp task
parallel_merge_sort( xs, xm, zs, !inplace );
parallel_merge_sort( xm, xe, zm, !inplace );
#pragma omp taskwait
if( inplace )
parallel_merge( zs, zm, zm, ze, xs );
else
parallel_merge( xs, xm, xm, xe, zs );
}
}
// OpenMP tasks do not run in parallel unless launched inside a thread team.
// This outer wrapper shows how to create the thread team and run the top-level call.
void do_parallel_merge_sort( T* xs, T* xe, T* zs, bool inplace ) {
// Create a thread team.
#pragma omp parallel
// Make only one thread do the top-level call.
// Other threads in team pick up spawned tasks.
#pragma omp single
{
parallel_merge_sort( xs, xe, zs, true );
}
}
|
convolution.c | //--------------------------------------------------------------------------//
//
// convolution.c
//
// Created by Josep Lluis Lerida on 11/03/2015
// Modified by Didac Semente Fernandez on 04/04/2016
//
// This program calculates the convolution for PPM images.
// The program accepts an PPM image file, a text definition of the kernel
// matrix and the PPM file for storing the convolution results.
// The program allows to define image partitions for processing larger
// images (>500MB).
// The 2D image is represented by 1D vector for chanel R, G and B.
// The convolution is applied to each chanel separately.
//
//--------------------------------------------------------------------------//
//--------------------------------------------------------------------------//
// -- EXTERNAL LIBRARIES -------------------------------------------------- //
//--------------------------------------------------------------------------//
#include <ctype.h>
#include <mpi.h>
#include <omp.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
//--------------------------------------------------------------------------//
#include "lib/convolution.h"
//--------------------------------------------------------------------------//
// -- MACRO DEFINITION -----------------------------------------------------//
//--------------------------------------------------------------------------//
#define F_MICROS_IN_SECOND 1000000.0
#define TRUE 1
#define FALSE 0
#define REALLOC_MARGIN 10
#define INCREASE_FACTOR 100
//--------------------------------------------------------------------------//
// -- AUXILIARY METHODS ----------------------------------------------------//
//--------------------------------------------------------------------------//
int validateParameters(char**);
double calculateExtraSize(int partitions);
long rebuildImage(ImageData, DataBucket*);
long calcRasterWriteAmount(int*, long, long);
long calculateWriteAmount(OutputBucket, ImageData, int, int, int);
int gPrank;
//--------------------------------------------------------------------------//
// -- LIBRARY IMPLEMENTATION ---------------------------------------------- //
//--------------------------------------------------------------------------//
// Read the corresponding chunk from the source Image
int readChunk(MPI_File* mfp, intmax_t *offset, intmax_t *limit,
DataBucket bucket) {
intmax_t pos = *offset;
int value = 0, mult = 10;
int newValue = FALSE;
int increase = INCREASE_FACTOR;
long k = bucket->offset, bucketBlockSize, i = 0;
char c;
char *cbuff = NULL;
MPI_Status status;
int **temp = NULL;
temp = (int**) malloc(sizeof(int*)); // Avoid breaking strict aliasing
cbuff = (char*) malloc(sizeof(char) * (*limit - *offset + 1));
MPI_File_set_view(*mfp, *offset, MPI_CHAR, MPI_CHAR,
"native", MPI_INFO_NULL);
MPI_File_read(*mfp, &cbuff[0], (*limit - *offset + 1), MPI_CHAR, &status);
while(pos <= *limit) {
c = cbuff[i];
if(isdigit(c)) {
value = (value * mult) + (c - '0');
newValue = TRUE;
} else if(newValue) {
bucket->data[k] = value;
value = 0;
newValue = FALSE;
k++;
// CHECKING IF WE ARE ABOUT TO FILL THE BUCKET
*temp = bucket->data;
bucketBlockSize = bucket->blckSize;
bucket->blckSize = checkForRealloc((void**) temp,
bucket->blckSize, (k + REALLOC_MARGIN),
sizeof(bucket->data[0]), increase);
bucket->data = *temp;
if(bucketBlockSize < bucket->blckSize) {
increase *= 2;
} else if(bucket->blckSize == -1) {
perror("Error: ");
return -1;
}
}
pos += 1;
i += 1;
}
bucket->bsize = k;
free(temp);
free(cbuff);
return 0;
}
// Duplication of the just readed source chunk
// to the destiny image struct chunk
void* duplicateImageChunk(ImageData src, ImageData dst) {
int** temp = NULL;
long blckInc = (src->blckSize - dst->blckSize);
temp = (int**) malloc(sizeof(int*)); // Avoid breaking strcit aliasing
*temp = dst->R;
checkForRealloc((void**) temp, dst->blckSize, src->blckSize,
sizeof(dst->R[0]), blckInc);
dst->R = *temp;
*temp = dst->G;
checkForRealloc((void**) temp, dst->blckSize, src->blckSize,
sizeof(dst->G[0]), blckInc);
dst->G = *temp;
*temp = dst->B;
dst->blckSize = checkForRealloc((void**) temp, dst->blckSize,
src->blckSize, sizeof(dst->B[0]), blckInc);
dst->B = *temp;
*temp = NULL;
free(temp);
if(dst->blckSize == -1) {
return NULL;
}
dst->rsize = src->rsize;
dst->bsize = src->bsize;
dst->gsize = src->gsize;
if(memcpy((void*) dst->R, (void*) src->R,
dst->rsize * sizeof(dst->R[0])) == NULL) {
return NULL;
}
if(memcpy((void*) dst->G, (void*) src->G,
dst->gsize * sizeof(dst->G[0])) == NULL) {
return NULL;
}
return memcpy((void*) dst->B, (void*) src->B,
dst->bsize * sizeof(dst->B[0]));
}
// Open kernel file and reading kernel matrix.
// The kernel matrix 2D is stored in 1D format.
KernelData readKernel(char* fileName) {
FILE *fp;
int ksize = 0, tempvalue = 0;
KernelData kern = NULL;
// Opening the kernel file
if((fp = openFile(fileName, "r")) == NULL) {
perror("Error: ");
} else {
// Memory allocation
kern = (KernelData) malloc(sizeof(struct structkernel));
// Reading kernel matrix dimensions
fscanf(fp, "%d,%d,", &kern->kernelX, &kern->kernelY);
ksize = (kern->kernelX * kern->kernelY);
kern->vkern = (float*) malloc(ksize * sizeof(float));
// Reading kernel matrix values
for(int i = 0; i < ksize; i++) {
fscanf(fp, "%d,", &tempvalue);
kern->vkern[i] = (float) tempvalue;
}
fclose(fp);
}
return kern;
}
// Open the image file with the convolution results
int initfilestore(ImageData img, FILE** fp, char* fileName, long *position) {
// File with the resulting image is created
if((*fp = openFile(fileName, "w")) == NULL) {
perror("Error: ");
return -1;
}
// Writing image header
fprintf(*fp, "P%d\n%s\n%d %d\n%d\n", img->P, img->comment, img->width,
img->height, img->maxcolor);
*position = ftell(*fp);
return 0;
}
// Writing the image chunk to the resulting file.
int savingChunk(OutputBucket out, MPI_File *mfp, long *offset) {
MPI_Status status;
MPI_File_set_view(*mfp, *offset, MPI_CHAR, MPI_CHAR, "native",
MPI_INFO_NULL);
for(int i = 0; i < out->lineCount; i++) {
MPI_File_write(*mfp, (void*) &out->lines[i][0], out->lineSizes[i],
MPI_CHAR, &status);
}
return 0;
}
// This function frees the space allocated for the image structure.
void freeImagestructure(ImageData *src) {
free((*src)->comment);
free((*src)->R);
free((*src)->G);
free((*src)->B);
free(*src);
}
//--------------------------------------------------------------------------//
// 2D convolution
// 2D data are usually stored in computer memory as contiguous 1D array.
// So, we are using 1D array for 2D data.
// 2D convolution assumes the kernel is center originated, which means, if
// kernel size 3 then, k[-1], k[0], k[1]. The middle of index is always 0.
// The following programming logics are somewhat complicated because of using
// pointer indexing in order to minimize the number of multiplications.
//
//
// signed integer (32bit) version:
//--------------------------------------------------------------------------//
int convolve2D(int* in, int* out, int dataSizeX, int dataSizeY, int dataOff,
float* kernel, int kernelSizeX, int kernelSizeY) {
int *inPtr = NULL, *inPtr2 = NULL, *outPtr = NULL;
float *kPtr = NULL;
int kCenterX, kCenterY;
long rowMin, rowMax; // to check boundary of input array
long colMin, colMax; //
float sum; // temp accumulation buffer
// Parameter validatin
if(!in || !out || !kernel || dataSizeX <= 0 || kernelSizeX <= 0) {
return -1;
}
// Find centeral position of kernel (half of kernel size)
kCenterX = (int) kernelSizeX / 2;
kCenterY = (int) kernelSizeY / 2;
// init working pointers
// note that it is shifted (kCenterX, kCenterY),
inPtr = inPtr2 = &in[(dataSizeX * kCenterY) + kCenterX];
outPtr = out;
kPtr = kernel;
// start convolution
// number of rows
for(register int i = 0; i < dataSizeY; ++i) {
// compute the range of convolution, the current row of kernel
// should be between these
rowMax = i + kCenterY;
rowMin = i - dataSizeY + kCenterY;
// number of columns
for(register int j = 0; j < dataSizeX; ++j) {
// compute the range of convolution, the current column of kernel
// should be between these
colMax = j + kCenterX;
colMin = j - dataSizeX + kCenterX;
sum = 0.0f; // set to 0 before accumulate
// flip the kernel and traverse all the kernel values
// multiply each kernel value with underlying input data
// kernel rows
for(register int m = 0; m < kernelSizeY; ++m) {
// check if the index is out of bound of input array
if(m <= rowMax && m > rowMin) {
for(register int n = 0; n < kernelSizeX; ++n) {
// check the boundary of array
if(n <= colMax && n > colMin) {
sum += *(inPtr - n) * (*kPtr);
}
++kPtr;// next kernel
}
} else {
// out of bound, move to next row of kernel
kPtr += kernelSizeX;
}
// move input data 1 raw up
inPtr -= dataSizeX;
}
// convert integer number
if(sum >= 0.0f) {
*outPtr = (int)(sum + 0.5f);
} else { // For using with image editors like GIMP or others...
*outPtr = (int)(sum - 0.5f);
}
kPtr = kernel; // reset kernel to (0,0)
inPtr = ++inPtr2; // next input
++outPtr; // next output
}
}
return 0;
}
//--------------------------------------------------------------------------//
// -- AUXILIARY METHODS IMPLEMENTATION ------------------------------------ //
//--------------------------------------------------------------------------//
int validateParameters(char **args) {
if(access(args[1], F_OK)) {
perror("Input image error");
return -1;
} else if(access(args[2], F_OK)) {
perror("Kernel file error");
return -1;
} else if(atoi(args[4]) < 1) {
printf("Partition number error: value less than 1\n");
return -1;
}
return 0;
}
double calculateExtraSize(int partitions) {
double x = (double) partitions;
return (x / (15 + 3*x)) - 0.058f;
}
// Method used to fill the ImageData structure using the data found in the
// DataBucket list.
long rebuildImage(ImageData img, DataBucket *bucks) {
long r, g, b, tsize;
long rasterR, rasterG, rasterB;
long increaseR, increaseG, increaseB;
long memR, memG, memB;
int flip, **temp = NULL;
r = g = b = 0L;
flip = 0;
increaseR = increaseG = increaseB = INCREASE_FACTOR * 10;
memR = memG = memB = 0L;
temp = (int**) malloc(sizeof(int*)); // Avoid breaking strict aliasing
for(int i = 0; i < 1; i++) {
for(int j = 0; j < bucks[i]->bsize; j++) {
switch(flip) {
case 0:
img->R[r] = bucks[i]->data[j];
r++;
rasterR = img->blckSize;
*temp = img->R;
memR = checkForRealloc((void**) temp, img->blckSize,
r + REALLOC_MARGIN, sizeof(int), increaseR);
img->R = *temp;
if(rasterR < memR) {
increaseR *= 2;
}
break;
case 1:
img->G[g] = bucks[i]->data[j];
g++;
rasterG = img->blckSize;
*temp = img->G;
memG = checkForRealloc((void**) temp, img->blckSize,
g + REALLOC_MARGIN, sizeof(int), increaseG);
img->G = *temp;
if(rasterG < memG) {
increaseG *= 2;
}
break;
case 2:
img->B[b] = bucks[i]->data[j];
b++;
rasterB = img->blckSize;
*temp = img->B;
memB = checkForRealloc((void**) temp, img->blckSize,
b + REALLOC_MARGIN, sizeof(int), increaseB);
img->B = *temp;
if(rasterB < memB) {
increaseB *= 2;
img->blckSize = memB;
}
break;
}
*temp = NULL;
flip = (flip + 1) % 3;
}
bucks[i]->offset = 0;
}
*temp = NULL;
free(temp);
tsize = (r + g + b);
// Check for unaligned rasters
// Either 1 Blue is missing from the image or
// both 1 Green and 1 Blue.
switch(tsize % 3) {
case 0:
break;
case 2:
bucks[0]->offset += 1;
tsize -= 1;
case 1:
bucks[0]->offset += 1;
tsize -= 1;
break;
}
img->rsize = img->bsize = img->gsize = (tsize / 3);
return (tsize / 3);
}
int countDigits(int num) {
int n = 0;
while(num) {
num /= 10;
n++;
}
return n;
}
long calculateWriteAmount(OutputBucket outBuck, ImageData img, int offset,
int chunksize, int imgWidth) {
int chunkSplits[3];
long i = 0L, k = 0L, end = 0L, baseSize = 0L;
int increase = 0, digits = countDigits(img->maxcolor),
split, mod, threadId;
long writeAmount = 0L, *writeAmounts = NULL;
writeAmounts = (long*) calloc(3, sizeof(long));
mod = chunksize % 3;
split = (chunksize-mod) / 3;
chunkSplits[0] = chunkSplits[1] = chunkSplits[2] = split;
if(mod == 1) {
chunkSplits[2] += 1;
} else if(mod == 2) {
chunkSplits[1] += 1;
chunkSplits[2] += 1;
}
baseSize = (sizeof(char) * (digits+2) * 3 * imgWidth);
if(outBuck->lines == NULL) {
outBuck->lines = (char**) malloc(sizeof(char*) * 3);
for(int t = 0; t < 3; t++) {
outBuck->lines[t] = (char*) malloc((baseSize * chunkSplits[t]));
}
}
#pragma omp parallel private(threadId, i, end, k, increase)
{
threadId = omp_get_thread_num();
i = offset * imgWidth;
for(int t = 0; t < threadId; t++) {
i += (chunkSplits[t] * imgWidth);
}
end = i + (chunkSplits[threadId] * imgWidth);
k = 0L;
while(i < end) {
increase = sprintf(&(outBuck->lines[threadId])[k], "%d %d %d\n",
img->R[i], img->G[i], img->B[i]);
k += increase;
writeAmounts[threadId] += increase;
i++;
}
}
for(int t = 0; t < outBuck->lineCount; t++) {
writeAmount += writeAmounts[t];
}
outBuck->lineSizes = writeAmounts;
writeAmounts = NULL;
return writeAmount;
}
//--------------------------------------------------------------------------//
// - MAIN METHOD -----------------------------------------------------------//
//--------------------------------------------------------------------------//
int main(int argc, char **argv) {
int c, offset, pc;
int prank, pnum;
int partitions, effectivePart, halo, haloSize;
int imgWidth, imgHeight;
int convOffset, convSize;
long *writeOffs = NULL;
long totalWritten = 0L, writeSize = 0L;
long bposition, position, chunkSize, iterSize, bucketSize;
double start, tstart, tend, tread, tcopy, tconv, tstore, treadk;
float extraSizeFactor;
char *sourceFile, *outFile, *kernFile;
char cwd[1024];
FILE *fpsrc, *fpdst;
MPI_File *mfpsrc, *mfpdst;
ImageData source, output;
KernelData kern;
ImageChunk *chunkLst;
DataBucket *buckets;
OutputBucket outBuck;
c = offset = 0;
position = 0L;
tstart = tend = tread = tcopy = tconv = tstore = treadk = 0.0;
sourceFile = outFile = kernFile = NULL;
fpsrc = fpdst = NULL;
mfpsrc = mfpdst = NULL;
source = output = NULL;
kern = NULL;
outBuck = NULL;
extraSizeFactor = 1.0f;
if(argc != 5) {
printf("Usage: %s <image-file> <kernel-file> <result-file> "
"<partitions> \n\n", argv[0]);
printf("- image_file : source image path (*.ppm)\n");
printf("- kernel_file: kernel path (text file with 1D "
"kernel matrix)\n");
printf("- result_file: result image path (*.ppm)\n");
printf("- partitions : Image partitions\n");
return -1;
}
if(validateParameters(argv) == -1) {
return -1;
}
omp_set_dynamic(FALSE);
omp_set_num_threads(3);
MPI_Init(&argc, &argv);
start = MPI_Wtime();
tstart = start;
MPI_Comm_size(MPI_COMM_WORLD, &pnum);
MPI_Comm_rank(MPI_COMM_WORLD, &prank);
gPrank = prank;
if(gPrank == 0) {
printf("PROC COUNT: %d\n", pnum);
}
//Storing parameters
sourceFile = argv[1];
kernFile = argv[2];
outFile = argv[3];
partitions = atoi(argv[4]);
effectivePart = partitions * pnum;
writeOffs = (long*) malloc(sizeof(long) * pnum);
outBuck = (OutputBucket) malloc(sizeof(struct outbucket));
outBuck->lineCount = 3;
outBuck->lines = NULL;
outBuck->lineSizes = NULL;
getcwd(cwd, sizeof(cwd));
// Opening files
mfpsrc = (MPI_File*) malloc(sizeof(MPI_File));
mfpdst = (MPI_File*) malloc(sizeof(MPI_File));
openMPIFile(mfpsrc, sourceFile, MPI_MODE_RDONLY);
openMPIFile(mfpdst, outFile, MPI_MODE_WRONLY | MPI_MODE_CREATE);
// READING IMAGE HEADERS, KERNEL Matrix, DUPLICATE IMAGE DATA,
// OPEN RESULTING IMAGE FILE
// Reading kernel matrix
start = MPI_Wtime();
if ((kern = readKernel(kernFile)) == NULL) {
return -1;
}
// The matrix kernel defines the halo size to use with the image.
// The halo is zero when the image is not partitioned.
if (effectivePart == 1) {
halo = 0;
} else {
halo = kern->kernelY;
}
treadk = MPI_Wtime() - start;
// Reading Image Header. Image properties: Magical number, comment,
// size and color resolution.
start = MPI_Wtime();
// Calculating extra size for memory assignment in order to avoid
// calling realloc further in the execution
extraSizeFactor = extraSizeFactor + calculateExtraSize(effectivePart);
// Memory allocation based on number of partitions and halo size.
if((source = parseFileHeader(sourceFile, &fpsrc, effectivePart,
halo, extraSizeFactor)) == NULL) {
return -1;
}
imgWidth = source->width;
imgHeight = source->height;
bposition = source->headersize;
totalWritten = bposition;
tread = tread + (MPI_Wtime() - start);
// Duplicate the image struct.
start = MPI_Wtime();
if ((output = duplicateImageData(source, effectivePart, halo,
extraSizeFactor)) == NULL) {
return -1;
}
tcopy = tcopy + (MPI_Wtime() - start);
// Initialize Image output file. Open the file and store the image header
start = MPI_Wtime();
if(prank == 0) {
if (initfilestore(output, &fpdst, outFile, &position) != 0) {
perror("Error: ");
return -1;
}
fclose(fpdst);
}
tstore = tstore + (MPI_Wtime() - start);
bucketSize = (imgWidth * imgHeight * 3) / effectivePart;
bucketSize = bucketSize + (imgWidth * halo);
bucketSize = (long) ((float) bucketSize * extraSizeFactor);
chunkLst = calculateChunkSections(&fpsrc, source, effectivePart);
fclose(fpsrc);
if ((buckets = initializeBuckets(1, bucketSize)) == NULL) {
perror("Error: ");
return -1;
}
//----------------------------------------------------------------------//
// CHUNK PROCESSING LOOP
//----------------------------------------------------------------------//
while (c < partitions) {
pc = (pnum * c) + prank;
// Reading chunk.
start = MPI_Wtime();
if (readChunk(mfpsrc, &(chunkLst[pc]->start),
&(chunkLst[pc]->end), buckets[0])) {
return -1;
}
if(pnum > 1) {
transferUnalignedRasters(prank, pnum, buckets[0], imgWidth);
}
haloSize = (halo / 2);
if(pnum > 1) {
transferBorders(pc, partitions, prank, pnum, buckets[0],
imgWidth, haloSize);
}
// Copying data from the DataBucket into the ImageData arrays
//if(gPrank == 0 && c < 1)
iterSize = rebuildImage(source, buckets);
tread = tread + (MPI_Wtime() - start);
// Discarding incomplete row.
convOffset = (iterSize % imgWidth);
convSize = iterSize - convOffset;
// Rows to convolve needs to be bigger than kernel size, either way
// there'll be problems in pixel alignment.
if(pc < (effectivePart-1)) {
chunkSize = (convSize / imgWidth) - haloSize;
if(pc == 0) {
offset = 0;
} else {
offset = haloSize;
}
} else {
chunkSize = (convSize / imgWidth);
offset = haloSize;
}
// Duplicate the image chunk
start = MPI_Wtime();
if (duplicateImageChunk(source, output) == NULL) {
perror("Error: ");
return -1;
}
tcopy = tcopy + (MPI_Wtime() - start);
//------------------------------------------------------------------//
// - CHUNK CONVOLUTION ---------------------------------------------//
//------------------------------------------------------------------//
start = MPI_Wtime();
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
convolve2D(source->R, output->R, imgWidth, chunkSize,
offset, kern->vkern, kern->kernelX, kern->kernelY);
#pragma omp section
convolve2D(source->G, output->G, imgWidth, chunkSize,
offset, kern->vkern, kern->kernelX, kern->kernelY);
#pragma omp section
convolve2D(source->B, output->B, imgWidth, chunkSize,
offset, kern->vkern, kern->kernelX, kern->kernelY);
}
}
tconv = MPI_Wtime() - start;
//------------------------------------------------------------------//
// - CHUNK SAVING --------------------------------------------------//
//------------------------------------------------------------------//
start = MPI_Wtime();
if(pc > 0) {
offset = haloSize;
if(pc < (effectivePart - 1)) {
chunkSize = (convSize / imgWidth) - (haloSize * 2);
} else {
chunkSize = (convSize / imgWidth) - haloSize;
}
} else {
offset = 0;
chunkSize = (convSize / imgWidth) - haloSize;
}
writeSize = calculateWriteAmount(outBuck, output, offset, chunkSize,
imgWidth);
MPI_Allgather((void*) &writeSize, 1, MPI_LONG, (void*) &writeOffs[0],
1, MPI_LONG, MPI_COMM_WORLD);
position = totalWritten;
for(int i = 0; i < pnum; i++) {
if(i < prank) {
position = position + writeOffs[i];
}
totalWritten = totalWritten + writeOffs[i];
}
if (savingChunk(outBuck, mfpdst, &position)) {
perror("Error: ");
return -1;
}
tstore = tstore + (MPI_Wtime() - start);
// Moving previously discarded pixels to the beginning of the bucket
// for the next iteration
if(c < partitions-1) {
if(pnum > 1) {
adjustBucketContents(buckets, prank, pnum, imgWidth,
haloSize);
} else {
adjustProcessBucket(buckets, imgWidth, haloSize);
}
}
c++;
}
MPI_File_close(mfpsrc);
MPI_File_close(mfpdst);
tend = MPI_Wtime();
if(prank == 0) {
printf("-----------------------------------\n");
printf("| TYPE SIZES (BYTES) |\n");
printf("-----------------------------------\n");
printf("Size of short: ----> %ld\n", sizeof(short));
printf("Size of int: ------> %ld\n", sizeof(int));
printf("Size of long: -----> %ld\n", sizeof(long));
printf("Size of intmax_t: -> %ld\n", sizeof(intmax_t));
printf("Size of size_t: ---> %ld\n", sizeof(size_t));
printf("Size of float: ----> %ld\n", sizeof(float));
printf("Size of double: ---> %ld\n", sizeof(double));
printf("-----------------------------------\n");
printf("| IMAGE INFO |\n");
printf("-----------------------------------\n");
printf("Working directory: %s\n", cwd);
printf("File path: %s\n", sourceFile);
printf("File output: %s\n", outFile);
printf("Header size (bytes): %ld\n", source->headersize);
printf("Raster size (bytes): %jd\n", source->rastersize);
printf("ISizeX : %d\n", imgWidth);
printf("ISizeY : %d\n", imgHeight);
printf("kSizeX : %d\n", kern->kernelX);
printf("kSizeY : %d\n", kern->kernelY);
printf("-----------------------------------\n");
printf("| EXECUTION TIMES |\n");
printf("-----------------------------------\n");
printf("%.6lfs elapsed in reading image file.\n", tread);
printf("%.6lfs elapsed in copying image structure.\n", tcopy);
printf("%.6lfs elapsed in reading kernel matrix.\n", treadk);
printf("%.6lfs elapsed computing the convolution.\n", tconv);
printf("%.6lfs elapsed in writing the resulting image.\n", tstore);
printf("-----------------------------------\n");
printf("%.6lfs elapsed in total.\n", tend-tstart);
printf("-----------------------------------\n");
//printf("%s %s %d %.3lf\n", sourceFile, kernFile, pnum, tend-tstart);
}
//----------------------------------------------------------------------//
// - MEMORY CLEANING --------------------------------------------------//
//----------------------------------------------------------------------//
freeImagestructure(&source);
freeImagestructure(&output);
freeDataBuckets(buckets, 1);
freeChunkList(chunkLst, effectivePart);
free(kern->vkern);
free(kern);
free(mfpsrc);
free(mfpdst);
free(writeOffs);
//----------------------------------------------------------------------//
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
//--------------------------------------------------------------------------//
|
GxB_Vector_iso.c | //------------------------------------------------------------------------------
// GxB_Vector_iso: report if a vector is iso-valued or not
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_Vector_iso // return iso status of a vector
(
bool *iso, // true if the vector is iso-valued
const GrB_Vector v // vector to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_Vector_iso (&iso, v)") ;
GB_RETURN_IF_NULL (iso) ;
GB_RETURN_IF_NULL_OR_FAULTY (v) ;
ASSERT (GB_VECTOR_OK (v)) ;
//--------------------------------------------------------------------------
// return the iso status of a vector
//--------------------------------------------------------------------------
(*iso) = v->iso ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
par_mod_lr_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildModExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt total_global_cpts;
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* Intermediate matrices */
hypre_ParCSRMatrix *As_FF, *As_FC, *W;
HYPRE_Real *D_q, *D_w;
hypre_CSRMatrix *As_FF_diag;
hypre_CSRMatrix *As_FF_offd;
hypre_CSRMatrix *As_FC_diag;
hypre_CSRMatrix *As_FC_offd;
hypre_CSRMatrix *W_diag;
hypre_CSRMatrix *W_offd;
HYPRE_Int *As_FF_diag_i;
HYPRE_Int *As_FF_offd_i;
HYPRE_Int *As_FC_diag_i;
HYPRE_Int *As_FC_offd_i;
HYPRE_Int *W_diag_i;
HYPRE_Int *W_offd_i;
HYPRE_Int *W_diag_j;
HYPRE_Int *W_offd_j;
HYPRE_Real *As_FF_diag_data;
HYPRE_Real *As_FF_offd_data;
HYPRE_Real *As_FC_diag_data;
HYPRE_Real *As_FC_offd_data;
HYPRE_Real *W_diag_data;
HYPRE_Real *W_offd_data;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int new_ncols_P_offd;
HYPRE_Int num_cols_P_offd;
HYPRE_Int *P_marker = NULL;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int i, j;
HYPRE_Int *cpt_array;
HYPRE_Int *start_array;
HYPRE_Int *startf_array;
HYPRE_Int start, stop, startf, stopf;
HYPRE_Int cnt_diag, cnt_offd, row, c_pt;
/* Definitions */
//HYPRE_Real wall_time;
HYPRE_Int n_Cpts, n_Fpts;
HYPRE_Int num_threads = hypre_NumThreads();
//if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
n_Cpts = num_cpts_global[1]-num_cpts_global[0];
#else
total_global_cpts = num_cpts_global[num_procs];
n_Cpts = num_cpts_global[my_id+1]-num_cpts_global[my_id];
#endif
hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF);
As_FC_diag = hypre_ParCSRMatrixDiag(As_FC);
As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag);
As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag);
As_FC_offd = hypre_ParCSRMatrixOffd(As_FC);
As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd);
As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd);
As_FF_diag = hypre_ParCSRMatrixDiag(As_FF);
As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag);
As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag);
As_FF_offd = hypre_ParCSRMatrixOffd(As_FF);
As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd);
As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd);
n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag);
D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,row)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Real beta, gamma;
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
start_array[my_thread_num+1] = stop;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i] += cpt_array[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
startf = start - cpt_array[my_thread_num-1];
else
startf = 0;
if (my_thread_num < num_threads-1)
stopf = stop - cpt_array[my_thread_num];
else
stopf = n_Fpts;
startf_array[my_thread_num+1] = stopf;
/* Create D_q = D_beta */
for (i=startf; i < stopf; i++)
{
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
D_q[i] += As_FC_diag_data[j];
}
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
D_q[i] += As_FC_offd_data[j];
}
}
/* Create D_w = D_alpha + D_gamma */
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] < 0)
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
D_w[row] += A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
D_w[row] += A_offd_data[j];
}
for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++)
{
D_w[row] -= As_FF_diag_data[j];
}
for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++)
{
D_w[row] -= As_FF_offd_data[j];
}
D_w[row] -= D_q[row];
row++;
}
}
for (i=startf; i<stopf; i++)
{
j = As_FF_diag_i[i];
if (D_w[i]) beta = 1.0/D_w[i];
else beta = 1.0;
As_FF_diag_data[j] = beta*D_q[i];
if (D_q[i]) gamma = -1.0/D_q[i];
else gamma = 1.0;
for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
As_FF_diag_data[j] *= beta;
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
As_FF_offd_data[j] *= beta;
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
As_FC_diag_data[j] *= gamma;
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
As_FC_offd_data[j] *= gamma;
}
} /* end parallel region */
W = hypre_ParMatmul(As_FF, As_FC);
W_diag = hypre_ParCSRMatrixDiag(W);
W_offd = hypre_ParCSRMatrixOffd(W);
W_diag_i = hypre_CSRMatrixI(W_diag);
W_diag_j = hypre_CSRMatrixJ(W_diag);
W_diag_data = hypre_CSRMatrixData(W_diag);
W_offd_i = hypre_CSRMatrixI(W_offd);
W_offd_j = hypre_CSRMatrixJ(W_offd);
W_offd_data = hypre_CSRMatrixData(W_offd);
num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd);
/*-----------------------------------------------------------------------
* Intialize data for P
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts];
P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
startf = startf_array[my_thread_num];
stopf = startf_array[my_thread_num+1];
start = start_array[my_thread_num];
stop = start_array[my_thread_num+1];
if (my_thread_num > 0)
c_pt = cpt_array[my_thread_num-1];
else
c_pt = 0;
cnt_diag = W_diag_i[startf]+c_pt;
cnt_offd = W_offd_i[startf];
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
P_diag_j[cnt_diag] = c_pt++;
P_diag_data[cnt_diag++] = 1.0;
}
else
{
for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++)
{
P_diag_j[cnt_diag] = W_diag_j[j];
P_diag_data[cnt_diag++] = W_diag_data[j];
}
for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++)
{
P_offd_j[cnt_offd] = W_offd_j[j];
P_offd_data[cnt_offd++] = W_offd_data[j];
}
row++;
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
} /* end parallel region */
/*-----------------------------------------------------------------------
* Create matrix
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
num_cols_P_offd,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W);
hypre_ParCSRMatrixColMapOffd(W) = NULL;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
HYPRE_Int *map;
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
if (num_cols_P_offd)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < P_offd_size; i++)
{
P_marker[P_offd_j[i]] = 1;
}
new_ncols_P_offd = 0;
for (i=0; i < num_cols_P_offd; i++)
{
if (P_marker[i]) new_ncols_P_offd++;
}
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST);
map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i])
{
new_col_map_offd[index] = col_map_offd_P[i];
map[index++] = i;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],
new_ncols_P_offd);
}
hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd;
hypre_TFree(map, HYPRE_MEMORY_HOST);
}
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(D_q, memory_location_P);
hypre_TFree(D_w, memory_location_P);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(start_array, HYPRE_MEMORY_HOST);
hypre_TFree(startf_array, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(As_FF);
hypre_ParCSRMatrixDestroy(As_FC);
hypre_ParCSRMatrixDestroy(W);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildModExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle = NULL;
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt total_global_cpts;
hypre_CSRMatrix *As_FF_ext = NULL;
HYPRE_Real *As_FF_ext_data = NULL;
HYPRE_Int *As_FF_ext_i = NULL;
HYPRE_BigInt *As_FF_ext_j = NULL;
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* Intermediate matrices */
hypre_ParCSRMatrix *As_FF, *As_FC, *W;
HYPRE_Real *D_q, *D_w, *D_theta, *D_q_offd = NULL;
hypre_CSRMatrix *As_FF_diag;
hypre_CSRMatrix *As_FF_offd;
hypre_CSRMatrix *As_FC_diag;
hypre_CSRMatrix *As_FC_offd;
hypre_CSRMatrix *W_diag;
hypre_CSRMatrix *W_offd;
HYPRE_Int *As_FF_diag_i;
HYPRE_Int *As_FF_diag_j;
HYPRE_Int *As_FF_offd_i;
HYPRE_Int *As_FF_offd_j = NULL;
HYPRE_Int *As_FC_diag_i;
HYPRE_Int *As_FC_offd_i;
HYPRE_Int *W_diag_i;
HYPRE_Int *W_offd_i;
HYPRE_Int *W_diag_j;
HYPRE_Int *W_offd_j = NULL;
HYPRE_Real *As_FF_diag_data;
HYPRE_Real *As_FF_offd_data = NULL;
HYPRE_Real *As_FC_diag_data;
HYPRE_Real *As_FC_offd_data = NULL;
HYPRE_Real *W_diag_data;
HYPRE_Real *W_offd_data = NULL;
HYPRE_Real *buf_data = NULL;
HYPRE_Real *tmp_FF_diag_data = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_BigInt first_index;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int new_ncols_P_offd;
HYPRE_Int num_cols_P_offd;
HYPRE_Int *P_marker = NULL;
/* Loop variables */
HYPRE_Int index, startc, num_sends;
HYPRE_Int i, j, jj, k, kk;
HYPRE_Int *cpt_array;
HYPRE_Int *start_array;
HYPRE_Int *startf_array;
HYPRE_Int start, stop, startf, stopf;
HYPRE_Int cnt_diag, cnt_offd, row, c_pt;
HYPRE_Int num_cols_A_FF_offd;
HYPRE_Real value, value1, theta;
/* Definitions */
//HYPRE_Real wall_time;
HYPRE_Int n_Cpts, n_Fpts;
HYPRE_Int num_threads = hypre_NumThreads();
//if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
n_Cpts = num_cpts_global[1]-num_cpts_global[0];
#else
total_global_cpts = num_cpts_global[num_procs];
n_Cpts = num_cpts_global[my_id+1]-num_cpts_global[my_id];
#endif
hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF);
if (num_procs > 1)
{
As_FF_ext = hypre_ParCSRMatrixExtractBExt(As_FF,As_FF,1);
As_FF_ext_i = hypre_CSRMatrixI(As_FF_ext);
As_FF_ext_j = hypre_CSRMatrixBigJ(As_FF_ext);
As_FF_ext_data = hypre_CSRMatrixData(As_FF_ext);
}
As_FC_diag = hypre_ParCSRMatrixDiag(As_FC);
As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag);
As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag);
As_FC_offd = hypre_ParCSRMatrixOffd(As_FC);
As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd);
As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd);
As_FF_diag = hypre_ParCSRMatrixDiag(As_FF);
As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag);
As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag);
As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag);
As_FF_offd = hypre_ParCSRMatrixOffd(As_FF);
As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd);
As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd);
As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd);
n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag);
num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd);
#ifdef HYPRE_NO_GLOBAL_PARTITION
first_index = hypre_ParCSRMatrixRowStarts(As_FF)[0];
#else
first_index = hypre_ParCSRMatrixRowStarts(As_FF)[my_id];
#endif
tmp_FF_diag_data = hypre_CTAlloc(HYPRE_Real, As_FF_diag_i[n_Fpts], memory_location_P);
D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_theta = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,jj,k,kk,start,stop,startf,stopf,row,theta,value,value1)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
start_array[my_thread_num+1] = stop;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i] += cpt_array[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
startf = start - cpt_array[my_thread_num-1];
else
startf = 0;
if (my_thread_num < num_threads-1)
stopf = stop - cpt_array[my_thread_num];
else
stopf = n_Fpts;
startf_array[my_thread_num+1] = stopf;
for (i=startf; i < stopf; i++)
{
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
D_q[i] += As_FC_diag_data[j];
}
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
D_q[i] += As_FC_offd_data[j];
}
}
for (j = As_FF_diag_i[startf]; j < As_FF_diag_i[stopf]; j++)
{
tmp_FF_diag_data[j] = As_FF_diag_data[j];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (num_cols_A_FF_offd)
{
D_q_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, memory_location_P);
}
index = 0;
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(As_FF);
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), memory_location_P);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
buf_data[index++] = D_q[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_q_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] < 0)
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
D_w[row] += A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
D_w[row] += A_offd_data[j];
}
for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++)
{
D_w[row] -= As_FF_diag_data[j];
}
for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++)
{
D_w[row] -= As_FF_offd_data[j];
}
D_w[row] -= D_q[row];
row++;
}
}
for (i=startf; i<stopf; i++)
{
for (j = As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
{
jj = As_FF_diag_j[j];
value = D_q[jj];
for (k = As_FF_diag_i[jj]+1; k < As_FF_diag_i[jj+1]; k++)
{
kk = As_FF_diag_j[k];
if (kk == i)
{
value1 = tmp_FF_diag_data[k];
value += value1;
D_theta[i] += As_FF_diag_data[j]*value1/value;
break;
}
}
As_FF_diag_data[j] /= value;
}
for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
{
jj = As_FF_offd_j[j];
value = D_q_offd[jj];
for (k = As_FF_ext_i[jj]; k < As_FF_ext_i[jj+1]; k++)
{
kk = (HYPRE_Int)(As_FF_ext_j[k] - first_index);
if (kk == i)
{
value1 = As_FF_ext_data[k];
value += value1;
D_theta[i] += As_FF_offd_data[j]*value1/value;
break;
}
}
As_FF_offd_data[j] /= value;
}
As_FF_diag_data[As_FF_diag_i[i]] = 1.0;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i=startf; i<stopf; i++)
{
theta = (D_theta[i]+D_w[i]);
if (theta)
{
theta = -1.0/theta;
for (j=As_FF_diag_i[i]; j < As_FF_diag_i[i+1]; j++)
As_FF_diag_data[j] *= theta;
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
As_FF_offd_data[j] *= theta;
}
}
} /* end parallel region */
W = hypre_ParMatmul(As_FF, As_FC);
W_diag = hypre_ParCSRMatrixDiag(W);
W_offd = hypre_ParCSRMatrixOffd(W);
W_diag_i = hypre_CSRMatrixI(W_diag);
W_diag_j = hypre_CSRMatrixJ(W_diag);
W_diag_data = hypre_CSRMatrixData(W_diag);
W_offd_i = hypre_CSRMatrixI(W_offd);
W_offd_j = hypre_CSRMatrixJ(W_offd);
W_offd_data = hypre_CSRMatrixData(W_offd);
num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd);
/*-----------------------------------------------------------------------
* Intialize data for P
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts];
P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
startf = startf_array[my_thread_num];
stopf = startf_array[my_thread_num+1];
start = start_array[my_thread_num];
stop = start_array[my_thread_num+1];
if (my_thread_num > 0)
c_pt = cpt_array[my_thread_num-1];
else
c_pt = 0;
cnt_diag = W_diag_i[startf]+c_pt;
cnt_offd = W_offd_i[startf];
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
P_diag_j[cnt_diag] = c_pt++;
P_diag_data[cnt_diag++] = 1.0;
}
else
{
for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++)
{
P_diag_j[cnt_diag] = W_diag_j[j];
P_diag_data[cnt_diag++] = W_diag_data[j];
}
for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++)
{
P_offd_j[cnt_offd] = W_offd_j[j];
P_offd_data[cnt_offd++] = W_offd_data[j];
}
row++;
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
} /* end parallel region */
/*-----------------------------------------------------------------------
* Create matrix
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
num_cols_P_offd,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W);
hypre_ParCSRMatrixColMapOffd(W) = NULL;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
HYPRE_Int *map;
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
if (num_cols_P_offd)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < P_offd_size; i++)
P_marker[P_offd_j[i]] = 1;
new_ncols_P_offd = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i]) new_ncols_P_offd++;
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST);
map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i])
{
new_col_map_offd[index] = col_map_offd_P[i];
map[index++] = i;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],
new_ncols_P_offd);
}
hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd;
hypre_TFree(map, HYPRE_MEMORY_HOST);
}
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(D_q, memory_location_P);
hypre_TFree(D_q_offd, memory_location_P);
hypre_TFree(D_w, memory_location_P);
hypre_TFree(D_theta, memory_location_P);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(start_array, HYPRE_MEMORY_HOST);
hypre_TFree(startf_array, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, memory_location_P);
hypre_TFree(tmp_FF_diag_data, memory_location_P);
hypre_ParCSRMatrixDestroy(As_FF);
hypre_ParCSRMatrixDestroy(As_FC);
hypre_ParCSRMatrixDestroy(W);
hypre_CSRMatrixDestroy(As_FF_ext);
return hypre_error_flag;
}
|
MonteCarloMultiGPU.c | /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include <omp.h>
#include "cuda2acc.h"
#include "timer.h"
#define A1 0.31938153
#define A2 -0.356563782
#define A3 1.781477937
#define A4 -1.821255978
#define A5 1.330274429
#define RSQRT2PI 0.39894228040143267793994605993438
#define OPT_N 256
typedef struct{
float S;
float X;
float T;
float R;
float V;
} TOptiondata;
float rand_float(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
#pragma acc routine
double MoroInvCND(double P){
const double a1 = 2.50662823884;
const double a2 = -18.61500062529;
const double a3 = 41.39119773534;
const double a4 = -25.44106049637;
const double b1 = -8.4735109309;
const double b2 = 23.08336743743;
const double b3 = -21.06224101826;
const double b4 = 3.13082909833;
const double c1 = 0.337475482272615;
const double c2 = 0.976169019091719;
const double c3 = 0.160797971491821;
const double c4 = 2.76438810333863E-02;
const double c5 = 3.8405729373609E-03;
const double c6 = 3.951896511919E-04;
const double c7 = 3.21767881768E-05;
const double c8 = 2.888167364E-07;
const double c9 = 3.960315187E-07;
double y, z;
y = P - 0.5;
if(fabs(y) < 0.42){
z = y * y;
z = y * (((a4 * z + a3) * z + a2) * z + a1) / ((((b4 * z + b3) * z + b2) * z + b1) * z + 1);
}else{
if(y > 0)
z = log(-log(1.0 - P));
else
z = log(-log(P));
z = c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z * (c7 + z * (c8 + z * c9)))))));
if(y < 0) z = -z;
}
return z;
}
#pragma acc routine
double NormalDistribution(unsigned int i, unsigned int pathN){
double p = (double)(i + 1) / (double)(pathN + 1);
return MoroInvCND(p);
}
#pragma acc routine
static double endCallValue(double S, double X, double r, double MuByT, double VBySqrtT){
double callValue = S * exp(MuByT + VBySqrtT * r) - X;
return (callValue > 0) ? callValue : 0;
}
void MonteCarlo(float *call_value_e, float *confidence, TOptiondata option_data,unsigned int path_n)
{
const double S = option_data.S;
const double X = option_data.X;
const double T = option_data.T;
const double R = option_data.R;
const double V = option_data.V;
const double mu_x_t = (R - 0.5 * V * V) * T;
const double v_x_sqrt_t = V * sqrtf(T);
double sum = 0, sum2 = 0;
for(int pos = 0; pos < path_n; pos++) {
double sample = NormalDistribution(pos, path_n);
double call_value = endCallValue(S, X, sample, mu_x_t, v_x_sqrt_t);
sum += call_value;
sum2 += call_value * call_value;
}
*call_value_e = (float)(expf(-R * T) * sum / (double)path_n);
double stdDev = sqrtf(((double)path_n * sum2 - sum * sum)/ ((double)path_n * (double)(path_n - 1)));
*confidence = (float)(expf(-R * T) * 1.96 * stdDev / sqrtf((double)path_n));
}
void MonteCarloCPU(float *call_value_e_cpu, float *confidence_cpu, TOptiondata *option_data_arr, int path_n)
{
StartTimer();
for(int i = 0; i < OPT_N; i++){
MonteCarlo(call_value_e_cpu + i, confidence_cpu + i, option_data_arr[i], path_n);
}
printf("MonteCarloCPU() used time: %f (ms)\n", GetTimer());
}
void MonteCarloMultiGPU(float *call_value_e_gpu, float *confidence_gpu, TOptiondata *option_data_arr, int path_n)
{
int gpu_n = acc_get_num_devices(acc_device_nvidia);
//#pragma acc data copyin(option_data_arr[0:OPT_N]) copyout(call_value_e_gpu[0:OPT_N],confidence_gpu[0:OPT_N]) is preferred. "enter/exit data" is used here for timing purpose
#pragma acc enter data create(call_value_e_gpu[0:OPT_N],confidence_gpu[0:OPT_N]) copyin(option_data_arr[0:OPT_N])
{
StartTimer();
int len = OPT_N / gpu_n;
int rem = OPT_N % gpu_n;
int s, e;
for (int j = 0; j < gpu_n; j++) {
// set start point and end point for every part
if (j < rem) {
s = j * (len + 1);
e = s + len;
} else {
s = j * len + rem;
e = s + len - 1;
}
#pragma acc kernels loop async(j) present(call_value_e_gpu[0:OPT_N],confidence_gpu[0:OPT_N],option_data_arr[0:OPT_N])
for (int i = s; i <= e; i++) {
float *call_value_e = call_value_e_gpu + i, *confidence = confidence_gpu + i;
TOptiondata option_data = option_data_arr[i];
float S = option_data.S;
float X = option_data.X;
float T = option_data.T;
float R = option_data.R;
float V = option_data.V;
float mu_x_t = (R - 0.5f * V * V) * T;
float v_x_sqrt_t = V * sqrtf(T);
float sum = 0, sum2 = 0;
#pragma acc loop reduction(+:sum,sum2)
for(unsigned int pos = 0; pos < path_n; ++pos) {
float sample = NormalDistribution(pos, path_n);
float call_value = endCallValue(S, X, sample, mu_x_t, v_x_sqrt_t);
sum += call_value;
sum2 += call_value * call_value;
}
*call_value_e = (float)(expf(-R * T) * sum / (float)path_n);
float stdDev = sqrtf(((float)path_n * sum2 - sum * sum)/ ((float)path_n * (float)(path_n - 1)));
*confidence = (float)(expf(-R * T) * 1.96f * stdDev / sqrtf((float)path_n));
}
}
}
acc_wait_all();
printf("MonteCarloMultiGPU() used time: %f (ms)\n", GetTimer());
#pragma acc exit data copyout(call_value_e_gpu[0:OPT_N],confidence_gpu[0:OPT_N]) delete(option_data_arr[0:OPT_N])
}
void runtest(float thresh)
{
float call_value_e_gpu[OPT_N], confidence_gpu[OPT_N];
float call_value_e_cpu[OPT_N], confidence_cpu[OPT_N];
TOptiondata option_data[OPT_N];
int path_n = 1 << 18, i;
int GPU_N = acc_get_num_devices(acc_device_nvidia);
printf("Number of GPUs = %d\n", GPU_N);
printf("main(): generating input data...\n");
// init with random data
srand((unsigned)time(NULL));
for(i = 0; i < OPT_N; ++i){
option_data[i].S = rand_float(5.0f, 50.0f);
option_data[i].X = rand_float(10.0f, 25.0f);
option_data[i].T = rand_float(1.0f, 5.0f);
option_data[i].R = 0.06f;
option_data[i].V = 0.10f;
call_value_e_gpu[i] = -1.0f;
call_value_e_cpu[i] = -1.0f;
confidence_gpu[i] = -1.0f;
confidence_cpu[i] = -1.0f;
}
printf("running CPU MonteCarlo...\n");
MonteCarloCPU(call_value_e_cpu, confidence_cpu, option_data, path_n);
#pragma omp parallel num_threads(GPU_N)
{
acc_set_device_num(omp_get_thread_num()+1, acc_device_nvidia);
printf("GPU Device #%d\n", acc_get_current_cuda_device());
MonteCarloMultiGPU(call_value_e_gpu, confidence_gpu, option_data, path_n);
printf("%s\n", (fcheck(call_value_e_cpu, call_value_e_gpu, OPT_N, thresh) ? "Test FAILS" : "Test PASSES"));
}
printf("Number of options: %d\nNumber of paths: %d\n", OPT_N, path_n);
}
int main(int argc, char **argv)
{
float th = 0.1;
char *names[] = { "thresh" };
int flags[] = { 1 };
int map[] = { 0 };
struct OptionTable *opttable = make_opttable(1, names, flags, map);
printf("%s Starting...\n\n", argv[0]);
argproc(argc, argv, opttable);
const char *str_th = opttable->table[0].val;
if (str_th)
th = atof(str_th);
print_gpuinfo(argc, (const char **)argv);
printf("MonteCarloMultiGPU\n");
printf("==================\n");
runtest(th);
free_opttable(opttable);
return 0;
}
|
p_index.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#define THRESHOLD 8
static void
p_merge(unsigned int* base, unsigned int* l1, unsigned int* h1,
unsigned int* l2, unsigned int* h2, unsigned int* buf)
{
unsigned int* i = l1;
unsigned int* j = l2;
unsigned int* k = buf;
for(;(i != h1) && (j != h2);)
{
if(*(base+(*(j))) < *(base+(*(i)))) memcpy(k++, j++, sizeof(unsigned int));
else memcpy(k++, i++, sizeof(unsigned int));
}
for(;i != h1;) memcpy(k++, i++, sizeof(unsigned int));
for(;j != h2;) memcpy(k++, j++, sizeof(unsigned int));
memcpy(l1, buf, ((h1-l1) + (h2-l2)) * sizeof(unsigned int));
}
static void
p_sort_(unsigned int* base, unsigned int* l, unsigned int* h)
{
unsigned int t;
unsigned int* i;
for(i = l; i < h; i++)
{
unsigned int* elm = base + (*(i));
unsigned int* j;
for(j = i+1; j < h; j++)
{
if(*elm > *(base + (*(j))))
{
unsigned int t = *(i);
*(i) = *(j);
*(j) = t;
elm = base + (*(i));
}
}
}
}
static void
p_sort(unsigned int* base, unsigned int* l, unsigned int* h, unsigned int* p_)
{
if((h - l) <= THRESHOLD) p_sort_(base, l, h);
else
{
unsigned int* m = l + (h - l) / 2;
unsigned int* m_ = p_ + (m - l);
#pragma omp task
p_sort(base, l, m, p_);
p_sort(base, m, h, m_);
#pragma omp taskwait
p_merge(base, l, m, m, h, p_);
}
}
void
p_init(size_t sz, unsigned int* buf, unsigned int* p)
{
unsigned int* p_;
p_ = (unsigned int*) _mm_malloc(sz * sizeof(unsigned int), 64);
unsigned int i;
for(i = 0; i < sz; i++) p[i] = i;
unsigned int* l = p;
unsigned int* h = p + sz;
#pragma omp parallel
{
#pragma omp single
{
p_sort(buf, l, h, p_);
}
}
_mm_free(p_);
}
|
matrix.h | // @file matrix.h This code provide a templated matrix implementation
// @author TPOC: contact@palisade-crypto.org
//
// @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. THIS SOFTWARE IS
// PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef LBCRYPTO_MATH_MATRIX_H
#define LBCRYPTO_MATH_MATRIX_H
#include <cmath>
#include <functional>
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include "encoding/encodings.h"
#include "lattice/backend.h"
#include "math/backend.h"
#include "math/distrgen.h"
#include "math/nbtheory.h"
#include "utils/inttypes.h"
#include "utils/memory.h"
#include "utils/utilities.h"
using std::invalid_argument;
namespace lbcrypto {
template <class Element>
class Matrix : public Serializable {
public:
typedef vector<vector<Element>> data_t;
typedef vector<Element> data_row_t;
typedef std::function<Element(void)> alloc_func;
/**
* Constructor that initializes matrix values using a zero allocator
*
* @param &allocZero lambda function for zero initialization.
* @param &rows number of rows.
* @param &rows number of columns.
*/
Matrix(alloc_func allocZero, size_t rows, size_t cols)
: data(), rows(rows), cols(cols), allocZero(allocZero) {
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
// TODO: add Clear();
/**
* Constructor that initializes matrix values using a distribution generation
* allocator
*
* @param &allocZero lambda function for zero initialization (used for
* initializing derived matrix objects)
* @param &rows number of rows.
* @param &rows number of columns.
* @param &allocGen lambda function for initialization using a distribution
* generator.
*/
Matrix(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen);
/**
* Constructor of an empty matrix.
* SetSize must be called on this matrix to use it
* SetAlloc needs to be called if 0 passed to constructor
* This mostly exists to support deserializing
*
* @param &allocZero lambda function for zero initialization.
*/
explicit Matrix(alloc_func allocZero = 0)
: data(), rows(0), cols(0), allocZero(allocZero) {}
/**
* Set the size of a matrix, elements are zeroed out
*
* @param rows number of rows
* @param cols number of colums
*/
void SetSize(size_t rows, size_t cols) {
if (this->rows != 0 || this->cols != 0) {
PALISADE_THROW(not_available_error,
"You cannot SetSize on a non-empty matrix");
}
this->rows = rows;
this->cols = cols;
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* SetAllocator - set the function to allocate a zero;
* basically only required for deserializer
*
* @param allocZero
*/
void SetAllocator(alloc_func allocZero) { this->allocZero = allocZero; }
/**
* Copy constructor
*
* @param &other the matrix object to be copied
*/
Matrix(const Matrix<Element>& other)
: data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) {
deepCopyData(other.data);
}
/**
* Assignment operator
*
* @param &other the matrix object whose values are to be copied
* @return the resulting matrix
*/
Matrix<Element>& operator=(const Matrix<Element>& other);
/**
* In-place change of the current matrix to a matrix of all ones
*
* @return the resulting matrix
*/
Matrix<Element>& Ones();
// Macro for convenient definitions of class implementations of special
// functions
#define ONES_FOR_TYPE(T) \
template <> \
Matrix<T>& Matrix<T>::Ones() { \
for (size_t row = 0; row < rows; ++row) { \
for (size_t col = 0; col < cols; ++col) { \
data[row][col] = 1; \
} \
} \
return *this; \
}
/**
* In-place modulo reduction
*
* @return the resulting matrix
*/
Matrix<Element>& ModEq(const Element& modulus);
/**
* modular subtraction
*
* @return the resulting matrix
*/
Matrix<Element>& ModSubEq(Matrix<Element> const& b, const Element& modulus);
/**
* Fill matrix using the same element
*
* @param &val the element the matrix is filled by
*
* @return the resulting matrix
*/
Matrix<Element>& Fill(const Element& val);
/**
* In-place change of the current matrix to Identity matrix
*
* @return the resulting matrix
*/
Matrix<Element>& Identity();
#define IDENTITY_FOR_TYPE(T) \
template <> \
Matrix<T>& Matrix<T>::Identity() { \
for (size_t row = 0; row < rows; ++row) { \
for (size_t col = 0; col < cols; ++col) { \
if (row == col) { \
data[row][col] = 1; \
} else { \
data[row][col] = 0; \
} \
} \
} \
return *this; \
}
/**
* Sets the first row to be powers of two for when the base is two
*
* @param base is the base the digits of the matrix are represented in
* @return the resulting matrix
*/
Matrix<Element> GadgetVector(int64_t base = 2) const;
#define GADGET_FOR_TYPE(T) \
template <> \
Matrix<T> Matrix<T>::GadgetVector(int64_t base) const { \
Matrix<T> g(allocZero, rows, cols); \
auto base_matrix = allocZero(); \
size_t k = cols / rows; \
base_matrix = base; \
g(0, 0) = 1; \
for (size_t i = 1; i < k; i++) { \
g(0, i) = g(0, i - 1) * base_matrix; \
} \
for (size_t row = 1; row < rows; row++) { \
for (size_t i = 0; i < k; i++) { \
g(row, i + row * k) = g(0, i); \
} \
} \
return g; \
}
#define GADGET_FOR_TYPE_DCRT(T) \
template <> \
Matrix<T> Matrix<T>::GadgetVector(int64_t base) const { \
Matrix<T> g(allocZero, rows, cols); \
auto base_matrix = allocZero(); \
base_matrix = base; \
size_t bk = 1; \
\
auto params = g(0, 0).GetParams()->GetParams(); \
\
uint64_t digitCount = (long)ceil( \
log2(params[0]->GetModulus().ConvertToDouble()) / log2(base)); \
\
for (size_t k = 0; k < digitCount; k++) { \
for (size_t i = 0; i < params.size(); i++) { \
NativePoly temp(params[i]); \
temp = bk; \
g(0, k + i * digitCount).SetElementAtIndex(i, std::move(temp)); \
} \
bk *= base; \
} \
\
size_t kCols = cols / rows; \
for (size_t row = 1; row < rows; row++) { \
for (size_t i = 0; i < kCols; i++) { \
g(row, i + row * kCols) = g(0, i); \
} \
} \
return g; \
}
/**
* Computes the infinity norm
*
* @return the norm in double format
*/
double Norm() const;
#define NORM_FOR_TYPE(T) \
template <> \
double Matrix<T>::Norm() const { \
double retVal = 0.0; \
double locVal = 0.0; \
for (size_t row = 0; row < rows; ++row) { \
for (size_t col = 0; col < cols; ++col) { \
locVal = data[row][col].Norm(); \
if (locVal > retVal) { \
retVal = locVal; \
} \
} \
} \
return retVal; \
}
/**
* Matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
Matrix<Element> Mult(Matrix<Element> const& other) const;
/**
* Operator for matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
Matrix<Element> operator*(Matrix<Element> const& other) const {
return Mult(other);
}
/**
* Multiplication of matrix by a scalar
*
* @param &other the multiplier element
* @return the result of multiplication
*/
Matrix<Element> ScalarMult(Element const& other) const {
Matrix<Element> result(*this);
#pragma omp parallel for
for (size_t col = 0; col < result.cols; ++col) {
for (size_t row = 0; row < result.rows; ++row) {
result.data[row][col] = result.data[row][col] * other;
}
}
return result;
}
/**
* Operator for scalar multiplication
*
* @param &other the multiplier element
* @return the result of multiplication
*/
Matrix<Element> operator*(Element const& other) const {
return ScalarMult(other);
}
/**
* Equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
bool Equal(Matrix<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
return false;
}
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
if (data[i][j] != other.data[i][j]) {
return false;
}
}
}
return true;
}
/**
* Operator for equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
bool operator==(Matrix<Element> const& other) const { return Equal(other); }
/**
* Operator for non-equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
bool operator!=(Matrix<Element> const& other) const { return !Equal(other); }
/**
* Get property to access the data as a vector of vectors
*
* @return the data as vector of vectors
*/
const data_t& GetData() const { return data; }
/**
* Get property to access the number of rows in the matrix
*
* @return the number of rows
*/
size_t GetRows() const { return rows; }
/**
* Get property to access the number of columns in the matrix
*
* @return the number of columns
*/
size_t GetCols() const { return cols; }
/**
* Get property to access the zero allocator for the matrix
*
* @return the lambda function corresponding to the element zero allocator
*/
alloc_func GetAllocator() const { return allocZero; }
/**
* Sets the evaluation or coefficient representation for all ring elements
* that support the SetFormat method
*
* @param &format the enum value corresponding to coefficient or evaluation
* representation
*/
void SetFormat(Format format);
/**
* Matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
Matrix<Element> Add(Matrix<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
PALISADE_THROW(math_error,
"Addition operands have incompatible dimensions");
}
Matrix<Element> result(*this);
#pragma omp parallel for
for (size_t j = 0; j < cols; ++j) {
for (size_t i = 0; i < rows; ++i) {
result.data[i][j] += other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
Matrix<Element> operator+(Matrix<Element> const& other) const {
return this->Add(other);
}
/**
* Operator for in-place addition
*
* @param &other the matrix to be added
* @return the resulting matrix (same object)
*/
Matrix<Element>& operator+=(Matrix<Element> const& other);
/**
* Matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
Matrix<Element> Sub(Matrix<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
PALISADE_THROW(math_error,
"Subtraction operands have incompatible dimensions");
}
Matrix<Element> result(allocZero, rows, other.cols);
#pragma omp parallel for
for (size_t j = 0; j < cols; ++j) {
for (size_t i = 0; i < rows; ++i) {
result.data[i][j] = data[i][j] - other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
Matrix<Element> operator-(Matrix<Element> const& other) const {
return this->Sub(other);
}
/**
* Operator for in-place matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix (same object)
*/
Matrix<Element>& operator-=(Matrix<Element> const& other);
/**
* Matrix transposition
*
* @return the resulting matrix
*/
Matrix<Element> Transpose() const;
// YSP The signature of this method needs to be changed in the future
/**
* Matrix determinant - found using Laplace formula with complexity O(d!),
* where d is the dimension
*
* @param *result where the result is stored
*/
void Determinant(Element* result) const;
// Element Determinant() const;
/**
* Cofactor matrix - the matrix of determinants of the minors A_{ij}
* multiplied by -1^{i+j}
*
* @return the cofactor matrix for the given matrix
*/
Matrix<Element> CofactorMatrix() const;
/**
* Add rows to bottom of the matrix
*
* @param &other the matrix to be added to the bottom of current matrix
* @return the resulting matrix
*/
Matrix<Element>& VStack(Matrix<Element> const& other);
/**
* Add columns the right of the matrix
*
* @param &other the matrix to be added to the right of current matrix
* @return the resulting matrix
*/
Matrix<Element>& HStack(Matrix<Element> const& other);
/**
* Matrix indexing operator - writeable instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
Element& operator()(size_t row, size_t col) { return data[row][col]; }
/**
* Matrix indexing operator - read-only instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
Element const& operator()(size_t row, size_t col) const {
return data[row][col];
}
/**
* Matrix row extractor
*
* @param &row row index
* @return the row at the index
*/
Matrix<Element> ExtractRow(size_t row) const {
Matrix<Element> result(this->allocZero, 1, this->cols);
int i = 0;
for (auto& elem : this->GetData()[row]) {
result(0, i) = elem;
i++;
}
return result;
// return *this;
}
/**
* Matrix column extractor
*
* @param &col col index
* @return the col at the index
*/
Matrix<Element> ExtractCol(size_t col) const {
Matrix<Element> result(this->allocZero, this->rows, 1);
for (size_t i = 0; i < this->rows; i++) {
result(i, 0) = data[i][col];
}
return result;
// return *this;
}
/**
* Matrix rows extractor in a range from row_start to row_and; inclusive
*
* @param &row_start &row_end row indices
* @return the rows in the range delimited by indices inclusive
*/
inline Matrix<Element> ExtractRows(size_t row_start, size_t row_end) const {
Matrix<Element> result(this->allocZero, row_end - row_start + 1,
this->cols);
for (usint row = row_start; row < row_end + 1; row++) {
int i = 0;
for (auto elem = this->GetData()[row].begin();
elem != this->GetData()[row].end(); ++elem) {
result(row - row_start, i) = *elem;
i++;
}
}
return result;
}
friend std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m) {
os << "[ ";
for (size_t row = 0; row < m.GetRows(); ++row) {
os << "[ ";
for (size_t col = 0; col < m.GetCols(); ++col) {
os << m(row, col) << " ";
}
os << "]\n";
}
os << " ]\n";
return os;
}
/**
* Call switch format for each (ring) element
*
*/
void SwitchFormat();
#define NOT_AN_ELEMENT_MATRIX(T) \
template <> \
void Matrix<T>::SwitchFormat() { \
PALISADE_THROW(not_available_error, "Not a matrix of Elements"); \
}
/*
* Multiply the matrix by a vector whose elements are all 1's. This causes
* the elements of each row of the matrix to be added and placed into the
* corresponding position in the output vector.
*/
Matrix<Element> MultByUnityVector() const;
/*
* Multiply the matrix by a vector of random 1's and 0's, which is the same as
* adding select elements in each row together. Return a vector that is a rows
* x 1 matrix.
*/
Matrix<Element> MultByRandomVector(std::vector<int> ranvec) const;
template <class Archive>
void save(Archive& ar, std::uint32_t const version) const {
ar(::cereal::make_nvp("d", data));
ar(::cereal::make_nvp("r", rows));
ar(::cereal::make_nvp("c", cols));
}
template <class Archive>
void load(Archive& ar, std::uint32_t const version) {
if (version > SerializedVersion()) {
PALISADE_THROW(deserialize_error,
"serialized object version " + std::to_string(version) +
" is from a later version of the library");
}
ar(::cereal::make_nvp("d", data));
ar(::cereal::make_nvp("r", rows));
ar(::cereal::make_nvp("c", cols));
// users will need to SetAllocator for any newly deserialized matrix
}
std::string SerializedObjectName() const { return "Matrix"; }
static uint32_t SerializedVersion() { return 1; }
private:
data_t data;
size_t rows;
size_t cols;
alloc_func allocZero;
// mutable int NUM_THREADS = 1;
// deep copy of data - used for copy constructor
void deepCopyData(data_t const& src) {
data.clear();
data.resize(src.size());
for (size_t row = 0; row < src.size(); ++row) {
for (auto elem = src[row].begin(); elem != src[row].end(); ++elem) {
data[row].push_back(*elem);
}
}
}
};
/**
* Operator for scalar multiplication of matrix
*
* @param &e element
* @param &M matrix
* @return the resulting matrix
*/
template <class Element>
Matrix<Element> operator*(Element const& e, Matrix<Element> const& M) {
return M.ScalarMult(e);
}
/**
* Generates a matrix of rotations. See pages 7-8 of
* https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
template <typename Element>
Matrix<typename Element::Integer> Rotate(Matrix<Element> const& inMat);
/**
* Each element becomes a square matrix with columns of that element's
* rotations in coefficient form. See pages 7-8 of
* https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
template <typename Element>
Matrix<typename Element::Vector> RotateVecResult(Matrix<Element> const& inMat);
/**
* Stream output operator
*
* @param &os stream
* @param &m matrix to be outputted
* @return the chained stream
*/
template <class Element>
std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m);
/**
* Gives the Choleshky decomposition of the input matrix.
* The assumption is that covariance matrix does not have large coefficients
* because it is formed by discrete gaussians e and s; this implies int32_t can
* be used This algorithm can be further improved - see the Darmstadt paper
* section 4.4 http://eprint.iacr.org/2013/297.pdf
*
* @param &input the matrix for which the Cholesky decomposition is to be
* computed
* @return the resulting matrix of floating-point numbers
*/
Matrix<double> Cholesky(const Matrix<int32_t>& input);
void Cholesky(const Matrix<int32_t>& input, Matrix<double>& result);
/**
* Convert a matrix of integers from BigInteger to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
Matrix<int32_t> ConvertToInt32(const Matrix<BigInteger>& input,
const BigInteger& modulus);
/**
* Convert a matrix of BigVector to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
Matrix<int32_t> ConvertToInt32(const Matrix<BigVector>& input,
const BigInteger& modulus);
/**
* Split a vector of int32_t into a vector of ring elements with ring dimension
* n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
template <typename Element>
Matrix<Element> SplitInt64IntoElements(
Matrix<int64_t> const& other, size_t n,
const shared_ptr<typename Element::Params> params);
#define SPLIT64_FOR_TYPE(T) \
template <> \
Matrix<T> SplitInt64IntoElements( \
Matrix<int64_t> const& other, size_t n, \
const shared_ptr<typename T::Params> params) { \
auto zero_alloc = T::Allocator(params, Format::COEFFICIENT); \
size_t rows = other.GetRows() / n; \
Matrix<T> result(zero_alloc, rows, 1); \
for (size_t row = 0; row < rows; ++row) { \
std::vector<int64_t> values(n); \
for (size_t i = 0; i < n; ++i) values[i] = other(row * n + i, 0); \
result(row, 0) = values; \
} \
return result; \
}
/**
* Another method for splitting a vector of int32_t into a vector of ring
* elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
template <typename Element>
Matrix<Element> SplitInt32AltIntoElements(
Matrix<int32_t> const& other, size_t n,
const shared_ptr<typename Element::Params> params);
#define SPLIT32ALT_FOR_TYPE(T) \
template <> \
Matrix<T> SplitInt32AltIntoElements( \
Matrix<int32_t> const& other, size_t n, \
const shared_ptr<typename T::Params> params) { \
auto zero_alloc = T::Allocator(params, Format::COEFFICIENT); \
size_t rows = other.GetRows(); \
Matrix<T> result(zero_alloc, rows, 1); \
for (size_t row = 0; row < rows; ++row) { \
std::vector<int32_t> values(n); \
for (size_t i = 0; i < n; ++i) values[i] = other(row, i); \
result(row, 0) = values; \
} \
return result; \
}
/**
* Split a vector of int64_t into a vector of ring elements with ring dimension
* n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
template <typename Element>
Matrix<Element> SplitInt64AltIntoElements(
Matrix<int64_t> const& other, size_t n,
const shared_ptr<typename Element::Params> params);
#define SPLIT64ALT_FOR_TYPE(T) \
template <> \
Matrix<T> SplitInt64AltIntoElements( \
Matrix<int64_t> const& other, size_t n, \
const shared_ptr<typename T::Params> params) { \
auto zero_alloc = T::Allocator(params, Format::COEFFICIENT); \
size_t rows = other.GetRows(); \
Matrix<T> result(zero_alloc, rows, 1); \
for (size_t row = 0; row < rows; ++row) { \
std::vector<int64_t> values(n); \
for (size_t i = 0; i < n; ++i) values[i] = other(row, i); \
result(row, 0) = values; \
} \
return result; \
}
} // namespace lbcrypto
#endif // LBCRYPTO_MATH_MATRIX_H
|
kmerz_count.c | // This program is used to count the kmers (upto 64mers) in a fasta/q dataset
extern "C" {
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include "stdlib.h"
#include "inttypes.h"
#include "omp.h"
#include "clparsing.h"
#include "kmer.h"
#include "fasta.h"
#include "fastq.h"
#include "bloom_filter.h"
}
#include "sparse_hash.h"
#define chunk 1000000
const char* program_version_major = "0";
const char* program_version_minor = "1";
const char* program_revision_date = "11172014";
const char* program_name = "kmerz_count";
const char* program_description =
"count kmers in the input fastq/fasta dataset";
const char* program_use =
"kmerz_count [options] kmer_length max_kmers_exp genome_size file.fq/file.fa";
Bool debug_flag;
static void ProcessChunk(char** const chunkbases,
const int numseqs,
const uint kmer_length,
SparseHashMap& kmers)
{
int indx, tid;
Kmer word, antiword, stored;
std::vector<Kmer> array;
std::vector<Kmer>::iterator it;
#pragma omp for
for (indx = 0; indx < numseqs; indx += 1) {
// let account for all the kmers in this sequence
word = BuildIndex(chunkbases[indx], kmer_length);
uint num_kmers = strlen(chunkbases[indx]) - kmer_length;
for (uint i = 0; i <= num_kmers; i++) {
word = GetNextKmer(word, chunkbases[indx],kmer_length,i);
antiword = ReverseComplementKmer(word, kmer_length);
stored = word < antiword ? word : antiword;
array.push_back(stored);
}
Ckfree(chunkbases[indx]);
}
#pragma omp critical
for (it = array.begin(); it != array.end(); ++it) {
if (CheckKmerInSparseHashMap(kmers, (*it)) == TRUE) {
if (kmers[(*it)] <= (umaxof(Kcount) - 1)) {
kmers[(*it)] += 1;
}
} else {
kmers[(*it)] = 1;
}
}
}
static void ProcessChunkFirstRound(char** const chunkbases,
const int numseqs,
const uint kmer_length,
BloomFilter* const bf,
SparseHashMap& kmers)
{
int indx, tid;
Kmer word, antiword, stored;
std::vector<Kmer> array;
std::vector<Kmer>::iterator it;
#pragma omp for
for (indx = 0; indx < numseqs; indx += 1) {
// let account for all the kmers in this sequence
word = BuildIndex(chunkbases[indx], kmer_length);
uint num_kmers = strlen(chunkbases[indx]) - kmer_length;
for (uint i = 0; i <= num_kmers; i++) {
word = GetNextKmer(word, chunkbases[indx],kmer_length,i);
antiword = ReverseComplementKmer(word, kmer_length);
stored = word < antiword ? word : antiword;
if (CheckKmerInSparseHashMap(kmers, stored) == FALSE) {
if (CheckKmerInBloomFilter(bf, stored) == TRUE) {
array.push_back(stored);
} else {
if (!AddKmerToBloomFilter(bf, stored)) {
array.push_back(stored);
}
}
} else {
array.push_back(stored);
}
}
Ckfree(chunkbases[indx]);
}
#pragma omp critical
for (it = array.begin(); it != array.end(); ++it) {
kmers[(*it)] = 0;
}
}
static void ProcessChunkSecondRound(char** const chunkbases,
const int numseqs,
const uint kmer_length,
SparseHashMap& kmers)
{
int indx, tid;
Kmer word, antiword, stored;
std::vector<Kmer> array;
std::vector<Kmer>::iterator it;
#pragma omp for
for (indx = 0; indx < numseqs; indx += 1) {
// let account for all the kmers in this sequence
word = BuildIndex(chunkbases[indx], kmer_length);
uint num_kmers = strlen(chunkbases[indx]) - kmer_length;
for (uint i = 0; i <= num_kmers; i++) {
word = GetNextKmer(word, chunkbases[indx],kmer_length,i);
antiword = ReverseComplementKmer(word, kmer_length);
stored = word < antiword ? word : antiword;
if (CheckKmerInSparseHashMap(kmers, stored) == TRUE) {
array.push_back(stored);
}
}
Ckfree(chunkbases[indx]);
}
#pragma omp critical
for (it = array.begin(); it != array.end(); ++it) {
if (kmers[(*it)] <= (umaxof(Kcount) - 1)) {
kmers[(*it)] += 1;
}
}
}
// count the kmers in the input file. Print the counts once you are done.
static void CountKmers(char* const* const file_names,
const int num_files,
const uint kmer_length,
const uint64_t num_expected_kmers,
const uint64_t expected_genome_size,
const int ploidy,
const Bool is_illumina_encoded,
const Bool do_trim,
const int progress_chunk,
const Bool is_fastq,
const int num_threads,
const Bool count_all) {
omp_set_num_threads(num_threads);
// this is the bloom filter to tag all the kmers that have been seen at
// least once.
BloomFilter* bf = NULL;
if (count_all == FALSE) {
bf = NewBloomFilter(0.001, num_expected_kmers, 0);
}
// this is the hash table I am going to use to store the counts
SparseHashMap kmers;
kmers.rehash(ploidy*expected_genome_size);
// gather a chunk of sequences so that we can use multiple threads to deal
// with them.
char** chunkbases = (char**)CkallocOrDie(chunk * sizeof(char*));
char* kmer_buffer = (char*)CkalloczOrDie(kmer_length + 1);
void* sequence;
uint64_t num_kmers_added = 0;
uint64_t num_sequence_processed = 0;
// go through all the files once to find the kmers
for (int findx = 0; findx < num_files; findx++) {
char* file_name = file_names[findx];
if (is_fastq) {
sequence = ReadFastqSequence(file_name,
is_illumina_encoded,
do_trim);
} else {
sequence = ReadFastaSequence(file_name);
}
uint bases_indx = 0;
while (sequence) {
uint slen;
char* name;
char* bases;
if (is_fastq) {
slen = ((FastqSequence*)sequence)->slen;
name = ((FastqSequence*)sequence)->name;
bases= ((FastqSequence*)sequence)->bases;
} else {
slen = ((FastaSequence*)sequence)->slen;
name = ((FastaSequence*)sequence)->header;
bases= ((FastaSequence*)sequence)->bases;
}
if (slen >= kmer_length) {
if (debug_flag == TRUE) {
PrintDebugMessage("1. Processing %s", name);
}
// print progress
num_sequence_processed += 1;
if ((num_sequence_processed - 1) % progress_chunk == 0) {
double frac = 0.0;
if (bf) frac = bf->num_set_bits * 1.0 / bf->num_bits;
PrintDebugMessage(
"1. Processing read %"PRIu64": %s, kmers: %zu, bf :%.5f",
num_sequence_processed, is_fastq ? name + 1 : name,
kmers.size(), frac);
}
// a load factor greater than 0.7-0.8 is a sign that the user did
// not select the expected number of kmers judiciously. Lets warn
// the user, as increasing the size of the hashtable can be very
// slow.
if (kmers.load_factor() > 0.8) {
PrintWarning("Current load factor: %2.6f",
kmers.load_factor());
PrintWarning(
"Maybe try increasing expected genome size from %"PRIu64,
expected_genome_size);
}
chunkbases[bases_indx++] = CopyString(bases);
if ((bases_indx % chunk) == 0) {
#pragma omp parallel
{
if (count_all == TRUE) {
ProcessChunk(chunkbases,chunk,kmer_length,kmers);
} else {
ProcessChunkFirstRound(chunkbases,chunk,kmer_length,bf,kmers);
}
}
bases_indx = 0;
}
}
if (is_fastq) {
sequence = GetNextSequence((FastqSequence*)sequence);
} else {
sequence = GetNextFastaSequence((FastaSequence*)sequence);
}
}
#pragma omp parallel
{
if (count_all == TRUE) {
ProcessChunk(chunkbases, bases_indx, kmer_length, kmers);
} else {
ProcessChunkFirstRound(chunkbases, bases_indx, kmer_length, bf, kmers);
}
}
PrintDebugMessage("1. Done with all the sequences in %s", file_name);
}
PrintDebugMessage("1. Counted %zu different kmers", kmers.size());
ReportMemoryUsage();
if (is_fastq) CloseFastqSequence((FastqSequence*)sequence);
else CloseFastaSequence((FastaSequence*)sequence);
if (count_all == TRUE) {
goto printkmers;
} else {
FreeBloomFilter(&bf);
}
num_sequence_processed = 0;
// lets count a second time and update the counts
for (int findx = 0; findx < num_files; findx++) {
char* file_name = file_names[findx];
PrintDebugMessage("Reopening %s to extract correct counts", file_name);
if (is_fastq) {
sequence = ReadFastqSequence(file_name,
is_illumina_encoded,
do_trim);
} else {
sequence = ReadFastaSequence(file_name);
}
uint bases_indx = 0;
while (sequence) {
uint slen;
char* name;
char* bases;
if (is_fastq) {
slen = ((FastqSequence*)sequence)->slen;
name = ((FastqSequence*)sequence)->name;
bases= ((FastqSequence*)sequence)->bases;
} else {
slen = ((FastaSequence*)sequence)->slen;
name = ((FastaSequence*)sequence)->header;
bases= ((FastaSequence*)sequence)->bases;
}
if (slen >= kmer_length) {
if (debug_flag == TRUE) {
PrintDebugMessage("2. Processing %s", name);
}
// print progress
num_sequence_processed += 1;
if ((num_sequence_processed - 1) % progress_chunk == 0) {
double frac = bf->num_set_bits * 1.0 / bf->num_bits;
PrintDebugMessage(
"2. Processing read %"PRIu64": %s",
num_sequence_processed, is_fastq ? name + 1 : name);
}
chunkbases[bases_indx++] = CopyString(bases);
if ((bases_indx % chunk) == 0) {
#pragma omp parallel
{
ProcessChunkSecondRound(chunkbases,chunk,kmer_length,kmers);
}
bases_indx = 0;
}
}
if (is_fastq) {
sequence = GetNextSequence((FastqSequence*)sequence);
} else {
sequence = GetNextFastaSequence((FastaSequence*)sequence);
}
}
#pragma omp parallel
{
ProcessChunkSecondRound(chunkbases, bases_indx, kmer_length, kmers);
}
}
// now lets print the kmer counts
printkmers:
SparseHashMap::const_iterator iter;
for (iter = kmers.begin(); iter != kmers.end(); iter++) {
const Kmer tmp = (*iter).first;
ConvertKmerToString(tmp, kmer_length, &kmer_buffer);
Kcount val = (*iter).second;
if (sizeof(Kcount) == 1) {
if (count_all == TRUE) {
printf("%s %"PRIu8"\n", kmer_buffer, val);
} else {
if (val > 1) printf("%s %"PRIu8"\n", kmer_buffer, val);
}
} else if (sizeof(Kcount) == 2) {
if (count_all == TRUE) {
printf("%s %"PRIu16"\n", kmer_buffer, val);
} else {
if (val > 1) printf("%s %"PRIu16"\n", kmer_buffer, val);
}
} else {
PrintThenDie("This use-case has not been handled yet.");
}
}
Ckfree(chunkbases);
Ckfree(kmer_buffer);
}
int main(int argc, char** argv) {
// start clock book-keeping
t0 = time(0);
// parse the command line
CommandLineArguments* cl_options = NewCommandLineArguments();
// these are the valid options for the various commands
AddOption(&cl_options, "threads", "1", TRUE, TRUE,
"number of threads", NULL);
AddOption(&cl_options, "format", "fastq", TRUE, TRUE,
"format of input file (fastq/fasta)", NULL);
AddOption(&cl_options, "qformat", "sanger", TRUE, TRUE,
"encoding of quality (illumina/sanger).", NULL);
AddOption(&cl_options, "trim", "FALSE", FALSE, TRUE,
"should I trim the low quality 3' ends of the reads.", NULL);
AddOption(&cl_options, "progress", "100000", TRUE, TRUE,
"print progress every so many sequences", NULL);
AddOption(&cl_options, "ploidy", "2", TRUE, TRUE,
"ploidy for the species", NULL);
AddOption(&cl_options, "exact", "FALSE", FALSE, TRUE,
"count all kmers, including singletons", NULL);
ParseOptions(&cl_options, &argc, &argv);
// does the user just want some help
Bool print_help = GetOptionBoolValueOrDie(cl_options, "help");
if (print_help == TRUE) {
PrintSimpleUsageString(cl_options);
return EXIT_SUCCESS;
}
// does the user know the correct syntax
if (argc < 5) {
PrintSimpleUsageString(cl_options);
return EXIT_FAILURE;
}
uint kmer_length;
if (sscanf(argv[1], "%u", &kmer_length) != 1) {
#ifdef Large
PrintMessageThenDie("Kmer length should be an odd integer < 64: %s",
argv[1]);
#else
PrintMessageThenDie("Kmer length should be an odd integer < 32: %s",
argv[1]);
#endif
}
if (kmer_length % 2 == 0) {
PrintWarning("Kmer length should be an odd integer, using %u",
--kmer_length);
}
uint64_t num_expected_kmers;
if (sscanf(argv[2], "%"PRIu64, &num_expected_kmers) != 1) {
PrintMessageThenDie("Number of expected kmers should be an integer: %s",
argv[2]);
}
uint64_t expected_genome_size;
if (sscanf(argv[3], "%"PRIu64, &expected_genome_size) != 1) {
PrintMessageThenDie("Expected genome size should be an integer: %s",
argv[3]);
}
// do I need additional debug info
debug_flag = GetOptionBoolValueOrDie(cl_options, "debug");
// count the kmers in these sequences
Bool is_illumina_encoded = SameString(GetOptionStringValue(cl_options,
"qformat"),"illumina")
? TRUE : FALSE;
Bool do_trim = GetOptionBoolValueOrDie(cl_options, "trim");
int progress_chunk = GetOptionIntValueOrDie(cl_options, "progress");
Bool is_fastq = TRUE;
if (SameString(GetOptionStringValue(cl_options, "format"), "fasta")) {
is_fastq = FALSE;
}
int num_threads = GetOptionIntValueOrDie(cl_options, "threads");
Bool count_all = GetOptionBoolValueOrDie(cl_options, "exact");
int ploidy = GetOptionIntValueOrDie(cl_options, "ploidy");
//char* file_name = argv[4];
CountKmers(argv + 4,
argc - 4,
kmer_length,
num_expected_kmers,
expected_genome_size,
ploidy,
is_illumina_encoded,
do_trim,
progress_chunk,
is_fastq,
num_threads,
count_all);
FreeParseOptions(&cl_options, &argv);
return EXIT_SUCCESS;
}
|
max_threads.c | // RUN: %compile-run-and-check
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int MaxThreadsL1 = -1, MaxThreadsL2 = -1;
#pragma omp declare reduction(unique:int \
: omp_out = (omp_in == 1 ? omp_in : omp_out)) \
initializer(omp_priv = -1)
// Non-SPMD mode.
#pragma omp target teams map(MaxThreadsL1, MaxThreadsL2) thread_limit(32) \
num_teams(1)
{
MaxThreadsL1 = omp_get_max_threads();
#pragma omp parallel reduction(unique : MaxThreadsL2)
{ MaxThreadsL2 = omp_get_max_threads(); }
}
//FIXME: This Non-SPMD kernel will have 32 active threads due to
// thread_limit. However, Non-SPMD MaxThreadsL1 is the total number of
// threads in block (64 in this case), which translates to worker
// threads + WARP_SIZE for Non-SPMD kernels and worker threads for SPMD
// kernels. According to the spec, omp_get_max_threads must return the
// max active threads possible between the two kernel types.
// CHECK: Non-SPMD MaxThreadsL1 = 64
printf("Non-SPMD MaxThreadsL1 = %d\n", MaxThreadsL1);
// CHECK: Non-SPMD MaxThreadsL2 = 1
printf("Non-SPMD MaxThreadsL2 = %d\n", MaxThreadsL2);
// SPMD mode with full runtime
MaxThreadsL2 = -1;
#pragma omp target parallel reduction(unique : MaxThreadsL2)
{ MaxThreadsL2 = omp_get_max_threads(); }
// CHECK: SPMD with full runtime MaxThreadsL2 = 1
printf("SPMD with full runtime MaxThreadsL2 = %d\n", MaxThreadsL2);
// SPMD mode without runtime
MaxThreadsL2 = -1;
#pragma omp target parallel for reduction(unique : MaxThreadsL2)
for (int I = 0; I < 2; ++I) {
MaxThreadsL2 = omp_get_max_threads();
}
// CHECK: SPMD without runtime MaxThreadsL2 = 1
printf("SPMD without runtime MaxThreadsL2 = %d\n", MaxThreadsL2);
return 0;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_specialization_constants_buffer,
kind_stream,
kind_last = kind_stream
};
public:
SYCLIntegrationHeader(Sema &S);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(StringRef MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(const FunctionDecl *SyclKernel, QualType KernelNameType,
SourceLocation Loc, bool IsESIMD, bool IsUnnamedKernel);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
/// Registers a specialization constant to emit info for it into the header.
void addSpecConstant(StringRef IDName, QualType IDType);
/// Update the names of a kernel description based on its SyclKernel.
void updateKernelNames(const FunctionDecl *SyclKernel, StringRef Name,
StringRef StableName) {
auto Itr = llvm::find_if(KernelDescs, [SyclKernel](const KernelDesc &KD) {
return KD.SyclKernel == SyclKernel;
});
assert(Itr != KernelDescs.end() && "Unknown kernel description");
Itr->updateKernelNames(Name, StableName);
}
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// Kernel invocation descriptor
struct KernelDesc {
/// sycl_kernel function associated with this kernel.
const FunctionDecl *SyclKernel;
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
SourceLocation KernelLocation;
/// Whether this kernel is an ESIMD one.
bool IsESIMDKernel;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
// If we are in unnamed kernel/lambda mode AND this is one that the user
// hasn't provided an explicit name for.
bool IsUnnamedKernel;
KernelDesc(const FunctionDecl *SyclKernel, QualType NameType,
SourceLocation KernelLoc, bool IsESIMD, bool IsUnnamedKernel)
: SyclKernel(SyclKernel), NameType(NameType), KernelLocation(KernelLoc),
IsESIMDKernel(IsESIMD), IsUnnamedKernel(IsUnnamedKernel) {}
void updateKernelNames(StringRef Name, StringRef StableName) {
this->Name = Name.str();
this->StableName = StableName.str();
}
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
using SpecConstID = std::pair<QualType, std::string>;
/// Keeps specialization constants met in the translation unit. Maps spec
/// constant's ID type to generated unique name. Duplicates are removed at
/// integration header emission time.
llvm::SmallVector<SpecConstID, 4> SpecConsts;
Sema &S;
};
class SYCLIntegrationFooter {
public:
SYCLIntegrationFooter(Sema &S) : S(S) {}
bool emit(StringRef MainSrc);
void addVarDecl(const VarDecl *VD);
private:
bool emit(raw_ostream &O);
Sema &S;
llvm::SmallVector<const VarDecl *> SpecConstants;
void emitSpecIDName(raw_ostream &O, const VarDecl *VD);
};
/// Tracks expected type during expression parsing, for use in code completion.
/// The type is tied to a particular token, all functions that update or consume
/// the type take a start location of the token they are looking at as a
/// parameter. This avoids updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Handles e.g. BaseType{ .D = Tok...
void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType,
const Designation &D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
///
/// The callback should also emit signature help as a side-effect, but only
/// if the completion point has been reached.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
/// Get the expected type associated with this location, if any.
///
/// If the location is a function argument, determining the expected type
/// involves considering all function overloads and the arguments so far.
/// In this case, signature help for these function overloads will be reported
/// as a side-effect (only if the completion point has been reached).
QualType get(SourceLocation Tok) const {
if (!Enabled || Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
bool Enabled;
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 32;
static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The namespace where coroutine components are defined. In standard,
/// they are defined in std namespace. And in the previous implementation,
/// they are defined in std::experimental namespace.
NamespaceDecl *CoroTraitsNamespaceCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// In addition of being constant evaluated, the current expression
/// occurs in an immediate function context - either a consteval function
/// or a consteval if function.
ImmediateFunctionContext,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
// A context can be nested in both a discarded statement context and
// an immediate function context, so they need to be tracked independently.
bool InDiscardedStatement;
bool InImmediateFunctionContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext),
InDiscardedStatement(false), InImmediateFunctionContext(false) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated ||
Context == ExpressionEvaluationContext::ImmediateFunctionContext;
}
bool isImmediateFunctionContext() const {
return Context == ExpressionEvaluationContext::ImmediateFunctionContext ||
(Context == ExpressionEvaluationContext::DiscardedStatement &&
InImmediateFunctionContext);
}
bool isDiscardedStatementContext() const {
return Context == ExpressionEvaluationContext::DiscardedStatement ||
(Context ==
ExpressionEvaluationContext::ImmediateFunctionContext &&
InDiscardedStatement);
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
const TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
class GlobalMethodPool {
public:
using Lists = std::pair<ObjCMethodList, ObjCMethodList>;
using iterator = llvm::DenseMap<Selector, Lists>::iterator;
iterator begin() { return Methods.begin(); }
iterator end() { return Methods.end(); }
iterator find(Selector Sel) { return Methods.find(Sel); }
std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) {
return Methods.insert(Val);
}
int count(Selector Sel) const { return Methods.count(Sel); }
bool empty() const { return Methods.empty(); }
private:
llvm::DenseMap<Selector, Lists> Methods;
};
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
/// Increment when we find a reference; decrement when we find an ignored
/// assignment. Ultimately the value is 0 if every reference is an ignored
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
/// This virtual key function only exists to limit the emission of debug info
/// describing the Sema class. GCC and Clang only emit debug info for a class
/// with a vtable when the vtable is emitted. Sema is final and not
/// polymorphic, but the debug info size savings are so significant that it is
/// worth adding a vtable just to take advantage of this optimization.
virtual void anchor();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
StringRef Platform);
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// Bitmask to contain the list of reasons a single diagnostic should be
/// emitted, based on its language. This permits multiple offload systems
/// to coexist in the same translation unit.
enum class DeviceDiagnosticReason {
/// Diagnostic doesn't apply to anything. Included for completeness, but
/// should make this a no-op.
None = 0,
/// OpenMP specific diagnostic.
OmpDevice = 1 << 0,
OmpHost = 1 << 1,
OmpAll = OmpDevice | OmpHost,
/// CUDA specific diagnostics.
CudaDevice = 1 << 2,
CudaHost = 1 << 3,
CudaAll = CudaDevice | CudaHost,
/// SYCL specific diagnostic.
Sycl = 1 << 4,
/// ESIMD specific diagnostic.
Esimd = 1 << 5,
/// A flag representing 'all'. This can be used to avoid the check
/// all-together and make this behave as it did before the
/// DiagnosticReason was added (that is, unconditionally emit).
/// Note: This needs to be updated if any flags above are added.
All = OmpAll | CudaAll | Sycl | Esimd,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/All)
};
private:
// A collection of a pair of undefined functions and their callers known
// to be reachable from a routine on the device (kernel or device function).
typedef std::pair<const FunctionDecl *, const FunctionDecl *> CallPair;
llvm::SmallVector<CallPair> UndefinedReachableFromSyclDevice;
public:
// Helper routine to add a pair of Callee-Caller pair of FunctionDecl *
// to UndefinedReachableFromSyclDevice.
void addFDToReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
UndefinedReachableFromSyclDevice.push_back(std::make_pair(Callee, Caller));
}
// Helper routine to check if a pair of Callee-Caller FunctionDecl *
// is in UndefinedReachableFromSyclDevice.
bool isFDReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
return llvm::any_of(UndefinedReachableFromSyclDevice,
[Callee, Caller](const CallPair &P) {
return P.first == Callee && P.second == Caller;
});
}
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S, DeviceDiagnosticReason R);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second
<< std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second.AddFixItHint(
Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether deferrable diagnostics should be deferred.
bool DeferDiags = false;
/// RAII class to control scope of DeferDiags.
class DeferDiagsRAII {
Sema &S;
bool SavedDeferDiags = false;
public:
DeferDiagsRAII(Sema &S, bool DeferDiags)
: S(S), SavedDeferDiags(S.DeferDiags) {
S.DeferDiags = DeferDiags;
}
~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; }
};
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void setFunctionHasMustTail();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// Retrieve the current function, if any, that should be analyzed for
/// potential availability violations.
sema::FunctionScopeInfo *getCurFunctionAvailabilityContext();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
LoopUnrollHintAttr *BuildLoopUnrollHintAttr(const AttributeCommonInfo &A,
Expr *E);
OpenCLUnrollHintAttr *
BuildOpenCLLoopUnrollHintAttr(const AttributeCommonInfo &A, Expr *E);
SYCLIntelFPGALoopCountAttr *
BuildSYCLIntelFPGALoopCountAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *
BuildSYCLIntelFPGAInitiationIntervalAttr(const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAMaxConcurrencyAttr *
BuildSYCLIntelFPGAMaxConcurrencyAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGAMaxInterleavingAttr *
BuildSYCLIntelFPGAMaxInterleavingAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGASpeculatedIterationsAttr *
BuildSYCLIntelFPGASpeculatedIterationsAttr(const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGALoopCoalesceAttr *
BuildSYCLIntelFPGALoopCoalesceAttr(const AttributeCommonInfo &CI, Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
/// Helper function to judge if we are in module purview.
/// Return false if we are not in a module.
bool isCurrentModulePurview() const {
return getCurrentModule() ? getCurrentModule()->isModulePurview() : false;
}
/// Enter the scope of the global module.
Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit);
/// Leave the scope of the global module.
void PopGlobalModuleFragment();
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
// Returns the underlying type of a decltype with the given expression.
QualType getDecltypeForExpr(Expr *E);
QualType BuildTypeofExprType(Expr *E);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
void warnOnReservedIdentifier(const NamedDecl *D);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
QualType &T, SourceLocation Loc,
unsigned FailedFoldDiagID);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
NamedDecl *getShadowedDeclaration(const BindingDecl *D,
const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
bool IsAbstract,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
/// Merge availability attributes for an implementation of
/// an optional protocol requirement.
AMK_OptionalProtocolImplementation
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef NewUserDiagnostic);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier.
CCEK_Noexcept ///< Condition in a noexcept(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id,
bool IsUDSuffix);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
DeviceDiagnosticReason getEmissionReason(const FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the
/// correct number of arguments were passed, etc. Returns true if the
/// attribute has been diagnosed.
bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A);
bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str,
const StringLiteral *Literal,
bool &HasDefault, bool &HasCommas,
SmallVectorImpl<StringRef> &Strings);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Process the attributes before creating an attributed statement. Returns
/// the semantic attributes that have been processed.
void ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesWithRange &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
struct NamedReturnInfo {
const VarDecl *Candidate;
enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable };
Status S;
bool isMoveEligible() const { return S != None; };
bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; }
};
enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn };
NamedReturnInfo getNamedReturnInfo(
Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal);
NamedReturnInfo getNamedReturnInfo(const VarDecl *VD);
const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info,
QualType ReturnType);
ExprResult
PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const NamedReturnInfo &NRInfo, Expr *Value,
bool SupressSimplerImplicitMoves = false);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
NamedReturnInfo &NRInfo,
bool SupressSimplerImplicitMoves);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// If VD is set but not otherwise used, diagnose, for a parameter or a
/// variable.
void DiagnoseUnusedButSetDecl(const VarDecl *VD);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false,
ArrayRef<const Expr *> StopAt = None);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the statements's reachability
/// analysis.
///
/// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until
/// the function body is parsed, and then do a basic reachability analysis to
/// determine if the statement is reachable. If it is unreachable, the
/// diagnostic will not be emitted.
bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts,
const PartialDiagnostic &PD);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
TypeSourceInfo *TSI);
ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
ParsedType ParsedTy);
ExprResult BuildSYCLUniqueStableIdExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
ExprResult ActOnSYCLUniqueStableIdExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
MultiExprArg CallArgs);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; }
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void FilterUsingLookup(Scope *S, LookupResult &lookup);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc,
const LookupResult *R = nullptr,
const UsingDecl *UD = nullptr);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation,
bool IsUsingIfExists);
NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
SourceLocation NameLoc, EnumDecl *ED);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc, const DeclSpec &);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr *> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
// Checks that the vector type should be initialized from a scalar
// by splatting the value rather than populating a single element.
// This is the case for AltiVecVector types as well as with
// AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified.
bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy);
// Checks if the -faltivec-src-compat=gcc option is specified.
// If so, AltiVecVector, AltiVecBool and AltiVecPixel types are
// treated the same way as they are when trying to initialize
// these vectors on gcc (an error is emitted).
bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy,
QualType SrcTy);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
// Complete an enum decl, maybe without a scope spec.
bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L,
CXXScopeSpec *SS = nullptr);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool BuildTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc,
bool AllowUnexpandedPack);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
// Substitute auto in TypeWithAuto for a Dependent auto type
QualType SubstAutoTypeDependent(QualType TypeWithAuto);
// Substitute auto in TypeWithAuto for a Dependent auto type
TypeSourceInfo *
SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
bool isImmediateFunctionContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isImmediateFunctionContext();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
template <typename AttrType>
void addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr, Expr *ZDimExpr);
void AddWorkGroupSizeHintAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDim, Expr *YDim, Expr *ZDim);
WorkGroupSizeHintAttr *
MergeWorkGroupSizeHintAttr(Decl *D, const WorkGroupSizeHintAttr &A);
void AddIntelReqdSubGroupSize(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelReqdSubGroupSizeAttr *
MergeIntelReqdSubGroupSizeAttr(Decl *D, const IntelReqdSubGroupSizeAttr &A);
IntelNamedSubGroupSizeAttr *
MergeIntelNamedSubGroupSizeAttr(Decl *D, const IntelNamedSubGroupSizeAttr &A);
void AddSYCLIntelNumSimdWorkItemsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNumSimdWorkItemsAttr *
MergeSYCLIntelNumSimdWorkItemsAttr(Decl *D,
const SYCLIntelNumSimdWorkItemsAttr &A);
void AddSYCLIntelESimdVectorizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelESimdVectorizeAttr *
MergeSYCLIntelESimdVectorizeAttr(Decl *D,
const SYCLIntelESimdVectorizeAttr &A);
void AddSYCLIntelSchedulerTargetFmaxMhzAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelSchedulerTargetFmaxMhzAttr *MergeSYCLIntelSchedulerTargetFmaxMhzAttr(
Decl *D, const SYCLIntelSchedulerTargetFmaxMhzAttr &A);
void AddSYCLIntelNoGlobalWorkOffsetAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNoGlobalWorkOffsetAttr *MergeSYCLIntelNoGlobalWorkOffsetAttr(
Decl *D, const SYCLIntelNoGlobalWorkOffsetAttr &A);
void AddSYCLIntelLoopFuseAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelLoopFuseAttr *
MergeSYCLIntelLoopFuseAttr(Decl *D, const SYCLIntelLoopFuseAttr &A);
void AddIntelFPGAPrivateCopiesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGAMaxReplicatesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAMaxReplicatesAttr *
MergeIntelFPGAMaxReplicatesAttr(Decl *D, const IntelFPGAMaxReplicatesAttr &A);
void AddIntelFPGAForcePow2DepthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAForcePow2DepthAttr *
MergeIntelFPGAForcePow2DepthAttr(Decl *D,
const IntelFPGAForcePow2DepthAttr &A);
void AddSYCLIntelFPGAInitiationIntervalAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *MergeSYCLIntelFPGAInitiationIntervalAttr(
Decl *D, const SYCLIntelFPGAInitiationIntervalAttr &A);
SYCLIntelFPGAMaxConcurrencyAttr *MergeSYCLIntelFPGAMaxConcurrencyAttr(
Decl *D, const SYCLIntelFPGAMaxConcurrencyAttr &A);
void AddSYCLIntelMaxGlobalWorkDimAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelMaxGlobalWorkDimAttr *
MergeSYCLIntelMaxGlobalWorkDimAttr(Decl *D,
const SYCLIntelMaxGlobalWorkDimAttr &A);
void AddIntelFPGABankWidthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGABankWidthAttr *
MergeIntelFPGABankWidthAttr(Decl *D, const IntelFPGABankWidthAttr &A);
void AddIntelFPGANumBanksAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGANumBanksAttr *
MergeIntelFPGANumBanksAttr(Decl *D, const IntelFPGANumBanksAttr &A);
SYCLDeviceHasAttr *MergeSYCLDeviceHasAttr(Decl *D,
const SYCLDeviceHasAttr &A);
void AddSYCLDeviceHasAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
SYCLUsesAspectsAttr *MergeSYCLUsesAspectsAttr(Decl *D,
const SYCLUsesAspectsAttr &A);
void AddSYCLUsesAspectsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addSYCLIntelPipeIOAttr - Adds a pipe I/O attribute to a particular
/// declaration.
void addSYCLIntelPipeIOAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ID);
/// AddSYCLIntelFPGAMaxConcurrencyAttr - Adds a max_concurrency attribute to a
/// particular declaration.
void AddSYCLIntelFPGAMaxConcurrencyAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
bool checkAllowedSYCLInitializer(VarDecl *VD);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
/// Lookup 'coroutine_traits' in std namespace and std::experimental
/// namespace. The namespace found is recorded in Namespace.
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc,
NamespaceDecl *&Namespace);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
struct DeclareTargetContextInfo {
struct MapInfo {
OMPDeclareTargetDeclAttr::MapTypeTy MT;
SourceLocation Loc;
};
/// Explicitly listed variables and functions in a 'to' or 'link' clause.
llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped;
/// The 'device_type' as parsed from the clause.
OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any;
/// The directive kind, `begin declare target` or `declare target`.
OpenMPDirectiveKind Kind;
/// The directive location.
SourceLocation Loc;
DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc)
: Kind(Kind), Loc(Loc) {}
};
/// Number of nested '#pragma omp declare target' directives.
SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true,
bool SuppressExprDiags = false);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Analyzes and checks a loop nest for use by a loop transformation.
///
/// \param Kind The loop transformation directive kind.
/// \param NumLoops How many nested loops the directive is expecting.
/// \param AStmt Associated statement of the transformation directive.
/// \param LoopHelpers [out] The loop analysis result.
/// \param Body [out] The body code nested in \p NumLoops loop.
/// \param OriginalInits [out] Collection of statements and declarations that
/// must have been executed/declared before entering the
/// loop.
///
/// \return Whether there was any error.
bool checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
Stmt *&Body,
SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
&OriginalInits);
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
/// Called on well-formed '\#pragma omp metadirective' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<std::string> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Called at the end of target region i.e. '#pragma omp end declare target'.
const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective();
/// Called once a target context is completed, that can be when a
/// '#pragma omp end declare target' was encountered or when a
/// '#pragma omp declare target' without declaration-definition-seq was
/// encountered.
void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to
/// an OpenMP loop directive.
StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
/// Process a canonical OpenMP loop nest that can either be a canonical
/// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an
/// OpenMP loop transformation construct.
StmtResult ActOnOpenMPLoopnest(Stmt *AStmt);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '#pragma omp tile' after parsing of its clauses and
/// the associated statement.
StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '#pragma omp unroll' after parsing of its clauses
/// and the associated statement.
StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp interop'.
StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp dispatch' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp masked' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp loop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \param NumAppendArgs The number of omp_interop_t arguments to account for
/// in checking.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, unsigned NumAppendArgs,
SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
/// \param AdjustArgsNothing The list of 'nothing' arguments.
/// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments.
/// \param AppendArgs The list of 'append_args' arguments.
/// \param AdjustArgsLoc The Location of an 'adjust_args' clause.
/// \param AppendArgsLoc The Location of an 'append_args' clause.
/// \param SR The SourceRange of the 'declare variant' directive.
void ActOnOpenMPDeclareVariantDirective(
FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI,
ArrayRef<Expr *> AdjustArgsNothing,
ArrayRef<Expr *> AdjustArgsNeedDevicePtr,
ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs,
SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc,
SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'align' clause.
OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'sizes' clause.
OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'full' clauses.
OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-form 'partial' clauses.
OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'when' clause.
OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'init' clause.
OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
bool IsTarget, bool IsTargetSync,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use' clause.
OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'novariants' clause.
OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nocontext' clause.
OMPClause *ActOnOpenMPNocontextClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'filter' clause.
OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *ActOnOpenMPMapClause(
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs, bool NoDiagnose = false,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// Called on a well-formed 'bind' clause.
OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult
ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_PRValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not. In the success case,
/// the statement is rewritten to remove implicit nodes from the return
/// value.
bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA);
private:
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not.
bool checkMustTailAttr(const Stmt *St, const Attr &MTA);
public:
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckMatrixCast - Check type constraints for matrix casts.
// We allow casting between matrixes of the same dimensions i.e. when they
// have the same number of rows and column. Returns true if the cast is
// invalid.
bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
CastKind &Kind);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
class DeviceDeferredDiagnostic {
public:
DeviceDeferredDiagnostic(SourceLocation SL, const PartialDiagnostic &PD,
DeviceDiagnosticReason R)
: Diagnostic(SL, PD), Reason(R) {}
PartialDiagnosticAt &getDiag() { return Diagnostic; }
DeviceDiagnosticReason getReason() const { return Reason; }
private:
PartialDiagnosticAt Diagnostic;
DeviceDiagnosticReason Reason;
};
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<DeviceDeferredDiagnostic>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder
diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID, FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
/// Check if the type is allowed to be used for the current target.
void checkTypeSupport(QualType Ty, SourceLocation Loc,
ValueDecl *D = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
enum CUDAVariableTarget {
CVT_Device, /// Emitted on device side with a shadow variable on host side
CVT_Host, /// Emitted on host side only
CVT_Both, /// Emitted on both sides with different addresses
CVT_Unified, /// Emitted as a unified address, e.g. managed variables
};
/// Determines whether the given variable is emitted on host or device side.
CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
enum class AttributeCompletion {
Attribute,
Scope,
None,
};
void CodeCompleteAttribute(
AttributeCommonInfo::Syntax Syntax,
AttributeCompletion Completion = AttributeCompletion::Attribute,
const IdentifierInfo *Scope = nullptr);
/// Determines the preferred type of the current function argument, by
/// examining the signatures of all possible overloads.
/// Returns null if unknown or ambiguous, or if code completion is off.
///
/// If the code completion point has been reached, also reports the function
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
void CheckSYCLKernelCall(FunctionDecl *CallerFunc, SourceRange CallLoc,
ArrayRef<const Expr *> Args);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckIntelFPGARegBuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool CheckIntelFPGAMemBuiltinFunctionCall(CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinArithmeticFence(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
bool SemaBuiltinElementwiseMath(CallExpr *TheCall);
bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall);
bool SemaBuiltinReduceMath(CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
llvm::SetVector<Decl *> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
std::unique_ptr<SYCLIntegrationFooter> SyclIntFooter;
// We need to store the list of the sycl_kernel functions and their associated
// generated OpenCL Kernels so we can go back and re-name these after the
// fact.
llvm::SmallVector<std::pair<const FunctionDecl *, FunctionDecl *>>
SyclKernelsToOpenCLKernels;
// Used to suppress diagnostics during kernel construction, since these were
// already emitted earlier. Diagnosing during Kernel emissions also skips the
// useful notes that shows where the kernel was called.
bool DiagnosingSYCLKernel = false;
public:
void addSyclOpenCLKernel(const FunctionDecl *SyclKernel,
FunctionDecl *OpenCLKernel) {
SyclKernelsToOpenCLKernels.emplace_back(SyclKernel, OpenCLKernel);
}
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); }
llvm::SetVector<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(*this);
return *SyclIntHeader.get();
}
SYCLIntegrationFooter &getSyclIntegrationFooter() {
if (SyclIntFooter == nullptr)
SyclIntFooter = std::make_unique<SYCLIntegrationFooter>(*this);
return *SyclIntFooter.get();
}
void addSyclVarDecl(VarDecl *VD) {
if (LangOpts.SYCLIsDevice && !LangOpts.SYCLIntFooter.empty())
getSyclIntegrationFooter().addVarDecl(VD);
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelCallDllimportFunction,
KernelCallVariadicFunction,
KernelCallUndefinedFunction,
KernelConstStaticVariable
};
bool isKnownGoodSYCLDecl(const Decl *D);
void checkSYCLDeviceVarDecl(VarDecl *Var);
void copySYCLKernelAttrs(const CXXRecordDecl *KernelObj);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void SetSYCLKernelNames();
void MarkDevices();
/// Get the number of fields or captures within the parsed type.
ExprResult ActOnSYCLBuiltinNumFieldsExpr(ParsedType PT);
ExprResult BuildSYCLBuiltinNumFieldsExpr(SourceLocation Loc,
QualType SourceTy);
/// Get a value based on the type of the given field number so that callers
/// can wrap it in a decltype() to get the actual type of the field.
ExprResult ActOnSYCLBuiltinFieldTypeExpr(ParsedType PT, Expr *Idx);
ExprResult BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc,
QualType SourceTy, Expr *Idx);
/// Get the number of base classes within the parsed type.
ExprResult ActOnSYCLBuiltinNumBasesExpr(ParsedType PT);
ExprResult BuildSYCLBuiltinNumBasesExpr(SourceLocation Loc,
QualType SourceTy);
/// Get a value based on the type of the given base number so that callers
/// can wrap it in a decltype() to get the actual type of the base class.
ExprResult ActOnSYCLBuiltinBaseTypeExpr(ParsedType PT, Expr *Idx);
ExprResult BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, QualType SourceTy,
Expr *Idx);
/// Emit a diagnostic about the given attribute having a deprecated name, and
/// also emit a fixit hint to generate the new attribute name.
void DiagnoseDeprecatedAttribute(const ParsedAttr &A, StringRef NewScope,
StringRef NewName);
/// Diagnoses an attribute in the 'intelfpga' namespace and suggests using
/// the attribute in the 'intel' namespace instead.
void CheckDeprecatedSYCLAttributeSpelling(const ParsedAttr &A,
StringRef NewName = "");
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(
SourceLocation Loc, unsigned DiagID,
DeviceDiagnosticReason Reason = DeviceDiagnosticReason::Sycl |
DeviceDiagnosticReason::Esimd);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Finishes analysis of the deferred functions calls that may be not
/// properly declared for device compilation.
void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc,
DeviceDiagnosticReason Reason);
/// Tells whether given variable is a SYCL explicit SIMD extension's "private
/// global" variable - global variable in the private address space.
bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) {
return getLangOpts().SYCLIsDevice && VDecl->hasAttr<SYCLSimdAttr>() &&
VDecl->hasGlobalStorage() &&
(VDecl->getType().getAddressSpace() == LangAS::sycl_private);
}
};
inline Expr *checkMaxWorkSizeAttrExpr(Sema &S, const AttributeCommonInfo &CI,
Expr *E) {
assert(E && "Attribute must have an argument.");
if (!E->isInstantiationDependent()) {
llvm::APSInt ArgVal;
ExprResult ICE = S.VerifyIntegerConstantExpression(E, &ArgVal);
if (ICE.isInvalid())
return nullptr;
E = ICE.get();
if (ArgVal.isNegative()) {
S.Diag(E->getExprLoc(),
diag::warn_attribute_requires_non_negative_integer_argument)
<< E->getType() << S.Context.UnsignedLongLongTy
<< E->getSourceRange();
return E;
}
unsigned Val = ArgVal.getZExtValue();
if (Val == 0) {
S.Diag(E->getExprLoc(), diag::err_attribute_argument_is_zero)
<< CI << E->getSourceRange();
return nullptr;
}
}
return E;
}
template <typename WorkGroupAttrType>
void Sema::addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr,
Expr *ZDimExpr) {
assert((XDimExpr && YDimExpr && ZDimExpr) &&
"argument has unexpected null value");
// Accept template arguments for now as they depend on something else.
// We'll get to check them when they eventually get instantiated.
if (!XDimExpr->isValueDependent() && !YDimExpr->isValueDependent() &&
!ZDimExpr->isValueDependent()) {
// Save ConstantExpr in semantic attribute
XDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, XDimExpr);
YDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, YDimExpr);
ZDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, ZDimExpr);
if (!XDimExpr || !YDimExpr || !ZDimExpr)
return;
}
D->addAttr(::new (Context)
WorkGroupAttrType(Context, CI, XDimExpr, YDimExpr, ZDimExpr));
}
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
GB_unop__identity_uint8_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_uint64)
// op(A') function: GB (_unop_tran__identity_uint8_uint64)
// C type: uint8_t
// A type: uint64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_uint64)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
calc.c | /**
* Copyright (c) 2005,2010 Yury Mishin <yury.mishin@gmail.com>
* See the file COPYING for copying permission.
*
* $Id$
*/
#include <string.h>
#include <math.h>
#include <omp.h>
#include "common.h"
#include "vector.h"
#include "kernel.h"
#include "eos.h"
#include "calc.h"
/**********************************************************/
/* The function to calculate the particles' pressures */
static void (*CalcPressByEOS) ( void);
/* The function to calculate the kernel's gradient */
static int (*GetGradKernel) ( float *Grad, float *Rij);
/* 'leap-frog' integration scheme */
static void LeapfrogIntegration ( void);
/**********************************************************/
/* External force field */
static float ExternalForce[] = { 0.0f, -0.00981f, 0.0f };
/* D factor to calculate repulsive Lennard-Jones forces */
static float LenJonD = 10.0f;
/* P1 power to calculate repulsive Lennard-Jones forces */
static float LenJonP1 = 4.0f;
/* P2 power to calculate repulsive Lennard-Jones forces */
static float LenJonP2 = 2.0f;
/**********************************************************/
/* Equation of state to calculate pressures */
char EOSType[20];
/* Kernel to use in the calculations */
char KernelType[20];
/* Kernel smoothing length */
float SmoothR;
/* Rest density */
float Density0;
/* Speed of sound */
float SOS;
/* Alpha factor to calculate viscosity */
float ViscAlpha;
/* Beta factor to calculate viscosity */
float ViscBeta;
/* Time step of integration */
float TimeStep;
/**********************************************************/
/**
* Initialize calculation module - choose appropriate
* kernel(s), equation of state, etc. according to
* parameters in the scene description file.
*/
void
InitCalc( void)
{
int i;
/* Choose the kernel for calculations */
for ( i = 0; i < KernelsNum; i++ )
{
/* Search for the required kernel */
if ( strcmp( KernelType, Kernels[i].Name) )
continue;
/* Initialize the kernel */
Kernels[i].Init();
/* The function to calculate the kernel's gradient */
GetGradKernel = Kernels[i].GetGrad;
break;
}
/* Choose the equation of state for calculations */
for ( i = 0; i < StateEquationsNum; i++ )
{
/* Search for the required EOS */
if ( strcmp(EOSType, StateEquations[i].Name) )
continue;
/* The function to calculate the particles' pressures */
CalcPressByEOS = StateEquations[i].CalcPress;
break;
}
return;
} /* InitCalc */
/**********************************************************/
/**
* Do one calculation step.
*/
void
DoCalcStep( void)
{
float GradKernel[3];
float PressTerm;
float ViscTerm;
float ViscNu;
float Vij[3];
float Rij[3];
float tmp1, tmp2;
int i, j, d;
/* Nu factor to calculate viscosity */
ViscNu = 0.01f * SmoothR * SmoothR;
/* Calculate the particles' pressures */
CalcPressByEOS();
/* Calculate the rates of change of velocities and the
* rates of change of densities for all the particles
* J.J.Monaghan, Simulating Free Surface Flows with SPH,
* J.Comput.Phys., 110, 399-406, 1994.
*/
#pragma omp parallel for schedule(dynamic,50) private(GradKernel,PressTerm,ViscTerm,Vij,Rij,tmp1,tmp2,j,d)
for ( i = 0; i < ParticlesNumber; i++ )
{
/* Take into account the external force field */
memcpy( Particles[i].Accel, ExternalForce, sizeof(ExternalForce));
Particles[i].DervDens = 0.0f;
/* Calculate forces between smoothing particles
* and update the rate of change of the density */
for ( j = 0; j < ParticlesNumber; j++ )
{
if ( j == i )
continue;
VectorSubstraction( Rij, Particles[i].Pos, Particles[j].Pos);
/* Get the kernel's gradient at the point Rij */
if ( GetGradKernel( GradKernel, Rij) )
continue;
/* Take into account the viscocity of the medium */
VectorSubstraction( Vij, Particles[i].Vel, Particles[j].Vel);
tmp1 = VectorInnerproduct( Rij, Vij);
if ( tmp1 < 0.0f )
{
tmp2 = VectorInnerproduct( Rij, Rij);
tmp1 = SmoothR * tmp1 / (tmp2 + ViscNu);
ViscTerm = 2.0f * tmp1 * (-ViscAlpha * SOS + ViscBeta * tmp1) /
(Particles[i].Dens + Particles[j].Dens);
}
else
{
ViscTerm = 0.0f;
}
/* Take into account the difference of the particles' pressures */
PressTerm = Particles[i].Press / (Particles[i].Dens * Particles[i].Dens) +
Particles[j].Press / (Particles[j].Dens * Particles[j].Dens);
/* Update the acceleration of the particle */
tmp1 = Particles[j].Mass * (PressTerm + ViscTerm);
for ( d = 0; d < Dimension; d++ )
Particles[i].Accel[d] -= tmp1 * GradKernel[d];
/* Update the rate of change of the density for the particle */
tmp1 = VectorInnerproduct( Vij, GradKernel);
Particles[i].DervDens += Particles[j].Mass * tmp1;
}
/* Calculate the Lennard-Jones forces between
* the particle and the boundary particles */
for ( j = 0; j < BParticlesNumber; j++ )
{
VectorSubstraction( Rij, Particles[i].Pos, BParticles[j].Pos);
tmp1 = VectorInnerproduct( Rij, Rij);
tmp2 = ParticlesDistrib / sqrt( tmp1);
/* Only repulsive forces are taken into account */
if ( tmp2 > 1.0f )
{
tmp1 = (pow( tmp2, LenJonP1) - pow( tmp2, LenJonP2)) *
LenJonD / tmp1;
for ( d = 0; d < Dimension; d++ )
Particles[i].Accel[d] += Rij[d] * tmp1;
}
}
}
/* Time integration */
LeapfrogIntegration();
return;
} /* DoCalcStep */
/**********************************************************/
/**
* 'leap-frog' integration scheme
* M.P.Allen and D.J.Tildesley, Computer Simulation
* of Liquids, Oxford Univ.Press, 1987.
*/
static void
LeapfrogIntegration( void)
{
int i;
int d;
/* Calculate new positions, velocities and densities for all the particles */
for ( i = 0; i < ParticlesNumber; i++ )
{
for ( d = 0; d < Dimension; d++ )
{
/* New interval velocity (t+dt/2) */
Particles[i].IvalVel[d] += Particles[i].Accel[d] * TimeStep;
/* New position (t+dt) */
Particles[i].Pos[d] += Particles[i].IvalVel[d] * TimeStep;
/* New velocity (t+dt) */
Particles[i].Vel[d] = Particles[i].IvalVel[d] +
Particles[i].Accel[d] * TimeStep / 2.0f;
}
/* New interval density (t+dt/2) */
Particles[i].IvalDens += Particles[i].DervDens * TimeStep;
/* New density (t+dt) */
Particles[i].Dens = Particles[i].IvalDens +
Particles[i].DervDens * TimeStep / 2.0f;
}
return;
} /* LeapfrogIntegration */
|
residual_based_adjoint_bossak_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors:
//
#if !defined(KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED)
#define KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED
// System includes
#include <vector>
#include <string>
#include <unordered_set>
#include <functional>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/checks.h"
#include "includes/kratos_parameters.h"
#include "solving_strategies/schemes/scheme.h"
#include "response_functions/adjoint_response_function.h"
#include "utilities/variable_utils.h"
#include "utilities/indirect_scalar.h"
#include "utilities/adjoint_extensions.h"
namespace Kratos
{
///@name Kratos Classes
///@{
/// A scheme for dynamic adjoint equations, using Bossak time integration.
/**
* It can be used for either first- or second-order time derivatives. Elements
* and conditions must provide a specialization of AdjointExtensions via their
* data value container, which allows the scheme to operate independently of
* the variable arrangements in the element or condition.
*/
template <class TSparseSpace, class TDenseSpace>
class ResidualBasedAdjointBossakScheme : public Scheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedAdjointBossakScheme);
typedef Scheme<TSparseSpace, TDenseSpace> BaseType;
typedef typename BaseType::TSystemMatrixType SystemMatrixType;
typedef typename BaseType::TSystemVectorType SystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::DofsArrayType DofsArrayType;
///@}
///@name Life Cycle
///@{
/// Constructor.
ResidualBasedAdjointBossakScheme(
Parameters Settings,
AdjointResponseFunction::Pointer pResponseFunction
) : mpResponseFunction(pResponseFunction)
{
Parameters default_parameters(R"({
"name" : "adjoint_bossak",
"scheme_type" : "bossak",
"alpha_bossak" : -0.3
})");
Settings.ValidateAndAssignDefaults(default_parameters);
mBossak.Alpha = Settings["alpha_bossak"].GetDouble();
}
/// Destructor.
~ResidualBasedAdjointBossakScheme() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void Initialize(ModelPart& rModelPart) override
{
KRATOS_TRY;
BaseType::Initialize(rModelPart);
// Allocate auxiliary memory.
int num_threads = OpenMPUtils::GetNumThreads();
mLeftHandSide.resize(num_threads);
mResponseGradient.resize(num_threads);
mFirstDerivsLHS.resize(num_threads);
mFirstDerivsResponseGradient.resize(num_threads);
mSecondDerivsLHS.resize(num_threads);
mSecondDerivsResponseGradient.resize(num_threads);
mAdjointValuesVector.resize(num_threads);
mAdjointIndirectVector2.resize(num_threads);
mAdjointIndirectVector3.resize(num_threads);
mAuxAdjointIndirectVector1.resize(num_threads);
InitializeNodeNeighbourCount(rModelPart.Nodes());
rModelPart.GetProcessInfo()[BOSSAK_ALPHA] = mBossak.Alpha;
KRATOS_CATCH("");
}
void InitializeSolutionStep(ModelPart& rModelPart,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
const auto& r_current_process_info = rModelPart.GetProcessInfo();
mBossak = CalculateBossakConstants(mBossak.Alpha, GetTimeStep(r_current_process_info));
this->CalculateNodeNeighbourCount(rModelPart);
KRATOS_CATCH("");
}
void FinalizeSolutionStep(ModelPart& rModelPart,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb);
this->UpdateAuxiliaryVariable(rModelPart);
KRATOS_CATCH("");
}
void Update(ModelPart& rModelPart,
DofsArrayType& rDofSet,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
// Update degrees of freedom: adjoint variables associated to the
// residual of the physical problem.
this->mpDofUpdater->UpdateDofs(rDofSet, rDx);
// Update adjoint variables associated to time integration.
this->UpdateTimeSchemeAdjoints(rModelPart);
KRATOS_CATCH("");
}
void CalculateSystemContributions(Element::Pointer pCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
auto& r_current_element = *pCurrentElement;
const auto k = OpenMPUtils::ThisThread();
r_current_element.GetValuesVector(mAdjointValuesVector[k]);
const auto local_size = mAdjointValuesVector[k].size();
if (rRHS_Contribution.size() != local_size)
{
rRHS_Contribution.resize(local_size, false);
}
if (rLHS_Contribution.size1() != local_size || rLHS_Contribution.size2() != local_size)
{
rLHS_Contribution.resize(local_size, local_size, false);
}
this->CheckAndResizeThreadStorage(local_size);
this->CalculateGradientContributions(r_current_element, rLHS_Contribution,
rRHS_Contribution, rCurrentProcessInfo);
this->CalculateFirstDerivativeContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
this->CalculateSecondDerivativeContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
this->CalculatePreviousTimeStepContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
this->CalculateResidualLocalContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
r_current_element.EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Calculate_LHS_Contribution(Element::Pointer pCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
LocalSystemVectorType RHS_Contribution;
CalculateSystemContributions(pCurrentElement, rLHS_Contribution, RHS_Contribution,
rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Condition_CalculateSystemContributions(Condition::Pointer pCurrentCondition,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Condition::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
// NOT TESTED !!!
pCurrentCondition->CalculateLocalSystem(
rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Condition_Calculate_LHS_Contribution(Condition::Pointer pCurrentCondition,
LocalSystemMatrixType& rLHS_Contribution,
Condition::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
LocalSystemVectorType RHS_Contribution;
Condition_CalculateSystemContributions(pCurrentCondition,
rLHS_Contribution, RHS_Contribution,
rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedAdjointBossakScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
struct BossakConstants
{
double Alpha;
double Beta;
double Gamma;
double C0;
double C1;
double C2;
double C3;
double C4;
double C5;
double C6;
double C7;
};
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
BossakConstants mBossak;
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater =
TSparseSpace::CreateDofUpdater();
AdjointResponseFunction::Pointer mpResponseFunction;
std::vector<LocalSystemMatrixType> mLeftHandSide;
std::vector<LocalSystemVectorType> mResponseGradient;
std::vector<LocalSystemMatrixType> mFirstDerivsLHS;
std::vector<LocalSystemVectorType> mFirstDerivsResponseGradient;
std::vector<LocalSystemMatrixType> mSecondDerivsLHS;
std::vector<LocalSystemVectorType> mSecondDerivsResponseGradient;
std::vector<LocalSystemVectorType> mAdjointValuesVector;
std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector2;
std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector3;
std::vector<std::vector<IndirectScalar<double>>> mAuxAdjointIndirectVector1;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
void CalculateGradientContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
rCurrentElement.CalculateLeftHandSide(mLeftHandSide[k], rCurrentProcessInfo);
this->mpResponseFunction->CalculateGradient(
rCurrentElement, mLeftHandSide[k], mResponseGradient[k], rCurrentProcessInfo);
noalias(rLHS_Contribution) = mLeftHandSide[k];
noalias(rRHS_Contribution) = -1. * mResponseGradient[k];
}
void CalculateFirstDerivativeContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
rCurrentElement.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], rCurrentProcessInfo);
mpResponseFunction->CalculateFirstDerivativesGradient(
rCurrentElement, mFirstDerivsLHS[k],
mFirstDerivsResponseGradient[k], rCurrentProcessInfo);
noalias(rLHS_Contribution) += mBossak.C6 * mFirstDerivsLHS[k];
noalias(rRHS_Contribution) -=
mBossak.C6 * mFirstDerivsResponseGradient[k];
}
void CalculateSecondDerivativeContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
auto& r_response_function = *(this->mpResponseFunction);
rCurrentElement.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rCurrentProcessInfo);
mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha);
r_response_function.CalculateSecondDerivativesGradient(
rCurrentElement, mSecondDerivsLHS[k],
mSecondDerivsResponseGradient[k], rCurrentProcessInfo);
noalias(rLHS_Contribution) += mBossak.C7 * mSecondDerivsLHS[k];
noalias(rRHS_Contribution) -=
mBossak.C7 * mSecondDerivsResponseGradient[k];
}
void CalculatePreviousTimeStepContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
const auto& r_geometry = rCurrentElement.GetGeometry();
const auto k = OpenMPUtils::ThisThread();
auto& r_extensions = *rCurrentElement.GetValue(ADJOINT_EXTENSIONS);
unsigned local_index = 0;
for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node)
{
auto& r_node = r_geometry[i_node];
r_extensions.GetFirstDerivativesVector(i_node, mAdjointIndirectVector2[k], 1);
r_extensions.GetSecondDerivativesVector(i_node, mAdjointIndirectVector3[k], 1);
r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1);
const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS);
for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d)
{
rRHS_Contribution[local_index] +=
weight *
(mBossak.C7 * mAuxAdjointIndirectVector1[k][d] +
mBossak.C4 * mAdjointIndirectVector2[k][d] +
mBossak.C5 * mAdjointIndirectVector3[k][d]);
++local_index;
}
}
}
void CalculateResidualLocalContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
auto& r_residual_adjoint = mAdjointValuesVector[k];
rCurrentElement.GetValuesVector(r_residual_adjoint);
noalias(rRHS_Contribution) -= prod(rLHS_Contribution, r_residual_adjoint);
}
void InitializeNodeNeighbourCount(ModelPart::NodesContainerType& rNodes)
{
// This loop should not be omp parallel
// The operation is not threadsafe if the value is uninitialized
for (auto& r_node : rNodes)
r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0);
}
void CalculateNodeNeighbourCount(ModelPart& rModelPart)
{
// Calculate number of neighbour elements for each node.
const int num_nodes = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i = 0; i < num_nodes; ++i)
{
Node<3>& r_node = *(rModelPart.Nodes().begin() + i);
r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0);
}
const int num_elements = rModelPart.NumberOfElements();
#pragma omp parallel for
for (int i = 0; i < num_elements; ++i)
{
Element& r_element = *(rModelPart.Elements().begin() + i);
Geometry<Node<3>>& r_geometry = r_element.GetGeometry();
for (unsigned j = 0; j < r_geometry.PointsNumber(); ++j)
{
double& r_num_neighbour =
r_geometry[j].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS);
#pragma omp atomic
r_num_neighbour += 1.0;
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS);
}
void UpdateTimeSchemeAdjoints(ModelPart& rModelPart)
{
KRATOS_TRY;
auto lambda2_vars = GatherVariables(
rModelPart.Elements(), [](const AdjointExtensions& rExtensions,
std::vector<const VariableData*>& rVec) {
rExtensions.GetFirstDerivativesVariables(rVec);
});
auto lambda3_vars = GatherVariables(
rModelPart.Elements(), [](const AdjointExtensions& rExtensions,
std::vector<const VariableData*>& rVec) {
return rExtensions.GetSecondDerivativesVariables(rVec);
});
SetToZero_AdjointVars(lambda2_vars, rModelPart.Nodes());
SetToZero_AdjointVars(lambda3_vars, rModelPart.Nodes());
const int number_of_elements = rModelPart.NumberOfElements();
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
Vector adjoint2_aux, adjoint3_aux;
std::vector<IndirectScalar<double>> adjoint2_old, adjoint3_old;
#pragma omp parallel for private(adjoint2_aux, adjoint3_aux, adjoint2_old, adjoint3_old)
for (int i = 0; i < number_of_elements; ++i)
{
Element& r_element = *(rModelPart.ElementsBegin() + i);
const int k = OpenMPUtils::ThisThread();
r_element.GetValuesVector(mAdjointValuesVector[k]);
this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size());
r_element.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], r_process_info);
this->mpResponseFunction->CalculateFirstDerivativesGradient(
r_element, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], r_process_info);
r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info);
mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha);
this->mpResponseFunction->CalculateSecondDerivativesGradient(
r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info);
if (adjoint2_aux.size() != mFirstDerivsResponseGradient[k].size())
adjoint2_aux.resize(mFirstDerivsResponseGradient[k].size(), false);
noalias(adjoint2_aux) = -mFirstDerivsResponseGradient[k] -
prod(mFirstDerivsLHS[k], mAdjointValuesVector[k]);
if (adjoint3_aux.size() != mSecondDerivsResponseGradient[k].size())
adjoint3_aux.resize(mSecondDerivsResponseGradient[k].size(), false);
noalias(adjoint3_aux) = -mSecondDerivsResponseGradient[k] -
prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]);
auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS);
// Assemble the contributions to the corresponding nodal unknowns.
unsigned local_index = 0;
Geometry<Node<3>>& r_geometry = r_element.GetGeometry();
for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node)
{
r_extensions.GetFirstDerivativesVector(
i_node, mAdjointIndirectVector2[k], 0);
r_extensions.GetSecondDerivativesVector(
i_node, mAdjointIndirectVector3[k], 0);
r_extensions.GetFirstDerivativesVector(i_node, adjoint2_old, 1);
r_extensions.GetSecondDerivativesVector(i_node, adjoint3_old, 1);
r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1);
Node<3>& r_node = r_geometry[i_node];
const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS);
r_node.SetLock();
for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d)
{
mAdjointIndirectVector2[k][d] += adjoint2_aux[local_index];
mAdjointIndirectVector2[k][d] += mBossak.C0 * weight * adjoint2_old[d];
mAdjointIndirectVector2[k][d] += mBossak.C1 * weight * adjoint3_old[d];
mAdjointIndirectVector3[k][d] += adjoint3_aux[local_index];
mAdjointIndirectVector3[k][d] += mBossak.C2 * weight * adjoint2_old[d];
mAdjointIndirectVector3[k][d] += mBossak.C3 * weight * adjoint3_old[d];
mAdjointIndirectVector3[k][d] +=
weight * mAuxAdjointIndirectVector1[k][d];
++local_index;
}
r_node.UnSetLock();
}
}
// Finalize global assembly
Assemble_AdjointVars(lambda2_vars, rModelPart.GetCommunicator());
Assemble_AdjointVars(lambda3_vars, rModelPart.GetCommunicator());
KRATOS_CATCH("");
}
void UpdateAuxiliaryVariable(ModelPart& rModelPart)
{
KRATOS_TRY;
auto aux_vars = GatherVariables(
rModelPart.Elements(), [](const AdjointExtensions& rExtensions,
std::vector<const VariableData*>& rOut) {
return rExtensions.GetAuxiliaryVariables(rOut);
});
SetToZero_AdjointVars(aux_vars, rModelPart.Nodes());
// Loop over elements to assemble the remaining terms
const int number_of_elements = rModelPart.NumberOfElements();
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
Vector aux_adjoint_vector;
#pragma omp parallel for private(aux_adjoint_vector)
for (int i = 0; i < number_of_elements; ++i)
{
Element& r_element = *(rModelPart.ElementsBegin() + i);
const int k = OpenMPUtils::ThisThread();
r_element.GetValuesVector(mAdjointValuesVector[k]);
this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size());
r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info);
mSecondDerivsLHS[k] *= mBossak.Alpha;
this->mpResponseFunction->CalculateSecondDerivativesGradient(
r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info);
if (aux_adjoint_vector.size() != mSecondDerivsLHS[k].size1())
aux_adjoint_vector.resize(mSecondDerivsLHS[k].size1(), false);
noalias(aux_adjoint_vector) =
prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]) +
mSecondDerivsResponseGradient[k];
auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS);
// Assemble the contributions to the corresponding nodal unknowns.
unsigned local_index = 0;
Geometry<Node<3>>& r_geometry = r_element.GetGeometry();
for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node)
{
Node<3>& r_node = r_geometry[i_node];
r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 0);
r_node.SetLock();
for (unsigned d = 0; d < mAuxAdjointIndirectVector1[k].size(); ++d)
{
mAuxAdjointIndirectVector1[k][d] -= aux_adjoint_vector[local_index];
++local_index;
}
r_node.UnSetLock();
}
}
// Finalize global assembly
Assemble_AdjointVars(aux_vars, rModelPart.GetCommunicator());
KRATOS_CATCH("");
}
void CheckAndResizeThreadStorage(unsigned SystemSize)
{
const int k = OpenMPUtils::ThisThread();
if (mLeftHandSide[k].size1() != SystemSize || mLeftHandSide[k].size2() != SystemSize)
{
mLeftHandSide[k].resize(SystemSize, SystemSize, false);
}
if (mFirstDerivsLHS[k].size1() != SystemSize || mFirstDerivsLHS[k].size2() != SystemSize)
{
mFirstDerivsLHS[k].resize(SystemSize, SystemSize, false);
}
if (mSecondDerivsLHS[k].size1() != SystemSize || mSecondDerivsLHS[k].size2() != SystemSize)
{
mSecondDerivsLHS[k].resize(SystemSize, SystemSize, false);
}
if (mResponseGradient[k].size() != SystemSize)
{
mResponseGradient[k].resize(SystemSize, false);
}
if (mFirstDerivsResponseGradient[k].size() != SystemSize)
{
mFirstDerivsResponseGradient[k].resize(SystemSize, false);
}
if (mSecondDerivsResponseGradient[k].size() != SystemSize)
{
mSecondDerivsResponseGradient[k].resize(SystemSize, false);
}
}
static BossakConstants CalculateBossakConstants(double Alpha, double DeltaTime)
{
BossakConstants bc;
bc.Alpha = Alpha;
bc.Beta = 0.25 * (1.0 - bc.Alpha) * (1.0 - bc.Alpha);
bc.Gamma = 0.5 - bc.Alpha;
bc.C0 = 1.0 - bc.Gamma / bc.Beta;
bc.C1 = -1.0 / (bc.Beta * DeltaTime);
bc.C2 = (1.0 - 0.5 * bc.Gamma / bc.Beta) * DeltaTime;
bc.C3 = (1.0 - 0.5 / bc.Beta);
bc.C4 = (bc.Beta - bc.Gamma * (bc.Gamma + 0.5)) / (DeltaTime * bc.Beta * bc.Beta);
bc.C5 = -1.0 * (bc.Gamma + 0.5) / (DeltaTime * DeltaTime * bc.Beta * bc.Beta);
bc.C6 = bc.Gamma / (bc.Beta * DeltaTime);
bc.C7 = 1.0 / (DeltaTime * DeltaTime * bc.Beta);
return bc;
}
static double GetTimeStep(const ProcessInfo& rCurrentProcessInfo)
{
const ProcessInfo& r_last_process_info =
rCurrentProcessInfo.GetPreviousSolutionStepInfo(1);
// Note: solution is backwards in time, but we still want a positive
// time step
// (it is the time step in the "forward" Bossak scheme).
double time_step =
r_last_process_info.GetValue(TIME) - rCurrentProcessInfo.GetValue(TIME);
KRATOS_ERROR_IF(time_step <= 0.0)
<< "Backwards in time solution is not decreasing time from last "
"step."
<< std::endl;
return time_step;
}
struct Hash
{
std::size_t operator()(const VariableData* const& p) const
{
return p->Key();
}
};
struct Pred
{
bool operator()(const VariableData* const l, const VariableData* const r) const
{
return *l == *r;
}
};
// Gathers variables needed for assembly.
static std::vector<const VariableData*> GatherVariables(
const ModelPart::ElementsContainerType& rElements,
std::function<void(const AdjointExtensions&, std::vector<const VariableData*>&)> GetLocalVars)
{
KRATOS_TRY;
const int num_threads = OpenMPUtils::GetNumThreads();
std::vector<const VariableData*> local_vars;
std::vector<std::unordered_set<const VariableData*, Hash, Pred>> thread_vars(num_threads);
#pragma omp parallel for private(local_vars)
for (int i = 0; i < static_cast<int>(rElements.size()); ++i)
{
auto& r_element = *(rElements.begin() + i);
GetLocalVars(*r_element.GetValue(ADJOINT_EXTENSIONS), local_vars);
const int k = OpenMPUtils::ThisThread();
thread_vars[k].insert(local_vars.begin(), local_vars.end());
}
std::unordered_set<const VariableData*, Hash, Pred> all_vars;
for (int i = 0; i < num_threads; ++i)
{
all_vars.insert(thread_vars[i].begin(), thread_vars[i].end());
}
return std::vector<const VariableData*>{all_vars.begin(), all_vars.end()};
KRATOS_CATCH("");
}
static void SetToZero_AdjointVars(const std::vector<const VariableData*>& rVariables,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY;
for (auto p_variable_data : rVariables)
{
if (KratosComponents<Variable<array_1d<double, 3>>>::Has(
p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<array_1d<double, 3>>>::Get(
p_variable_data->Name());
VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes);
}
else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<double>>::Get(p_variable_data->Name());
VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes);
}
else
{
KRATOS_ERROR << "Variable \"" << p_variable_data->Name()
<< "\" not found!\n";
}
}
KRATOS_CATCH("");
}
static void Assemble_AdjointVars(const std::vector<const VariableData*>& rVariables,
Communicator& rComm)
{
KRATOS_TRY;
for (auto p_variable_data : rVariables)
{
if (KratosComponents<Variable<array_1d<double, 3>>>::Has(
p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<array_1d<double, 3>>>::Get(
p_variable_data->Name());
rComm.AssembleCurrentData(r_variable);
}
else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<double>>::Get(p_variable_data->Name());
rComm.AssembleCurrentData(r_variable);
}
else
{
KRATOS_ERROR << "Variable \"" << p_variable_data->Name()
<< "\" not found!\n";
}
}
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedAdjointBossakScheme */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED defined */
|
convolution_1x1_pack1to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack1to4_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack1to4_int8_msa(bottom_im2col, top_blob, kernel, opt);
}
static void conv1x1s2_sgemm_pack1to4_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const signed char* r0 = bottom_blob.channel(p);
signed char* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
outptr[0] = r0[0];
outptr[1] = r0[2];
outptr[2] = r0[4];
outptr[3] = r0[6];
r0 += 8;
outptr += 4;
}
for (; j + 1 < outw; j += 2)
{
outptr[0] = r0[0];
outptr[1] = r0[2];
r0 += 4;
outptr += 2;
}
for (; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack1to4_int8_msa(bottom_blob_shrinked, top_blob, kernel, opt);
}
|
finalize_tool.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
int main() {
#pragma omp parallel num_threads(2)
{}
printf("Before ompt_finalize_tool\n");
ompt_finalize_tool();
printf("After ompt_finalize_tool\n");
return 0;
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin:
// CHECK-SAME: thread_type=ompt_thread_initial=1
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_parallel_end
// CHECK: {{^}}Before ompt_finalize_tool
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_thread_end: thread_id=[[THREAD_ID]]
// CHECK: 0: ompt_event_runtime_shutdown
// CHECK: {{^}}After ompt_finalize_tool
|
declare_mapper_messages.c | // RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s
int temp; // expected-note {{'temp' declared here}}
struct vec { // expected-note {{definition of 'struct vec' is not complete until the closing '}'}}
int len;
#pragma omp declare mapper(id: struct vec v) map(v.len) // expected-error {{incomplete definition of type 'struct vec'}}
double *data;
};
#pragma omp declare mapper // expected-error {{expected '(' after 'declare mapper'}}
#pragma omp declare mapper { // expected-error {{expected '(' after 'declare mapper'}}
#pragma omp declare mapper( // expected-error {{expected a type}} expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(# // expected-error {{expected a type}} expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(struct v // expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(struct vec // expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(S v // expected-error {{unknown type name 'S'}}
#pragma omp declare mapper(struct vec v // expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare mapper(aa:struct vec v) // expected-error {{expected at least one clause on '#pragma omp declare mapper' directive}}
#pragma omp declare mapper(bb:struct vec v) private(v) // expected-error {{expected at least one clause on '#pragma omp declare mapper' directive}} // expected-error {{unexpected OpenMP clause 'private' in directive '#pragma omp declare mapper'}}
#pragma omp declare mapper(cc:struct vec v) map(v) ( // expected-warning {{extra tokens at the end of '#pragma omp declare mapper' are ignored}}
#pragma omp declare mapper(++: struct vec v) map(v.len) // expected-error {{illegal OpenMP user-defined mapper identifier}}
#pragma omp declare mapper(id1: struct vec v) map(v.len, temp) // expected-error {{only variable 'v' is allowed in map clauses of this 'omp declare mapper' directive}}
#pragma omp declare mapper(default : struct vec kk) map(kk.data[0:2]) // expected-note {{previous definition is here}}
#pragma omp declare mapper(struct vec v) map(v.len) // expected-error {{redefinition of user-defined mapper for type 'struct vec' with name 'default'}}
#pragma omp declare mapper(int v) map(v) // expected-error {{mapper type must be of struct, union or class type}}
int fun(int arg) {
#pragma omp declare mapper(id: struct vec v) map(v.len)
{
#pragma omp declare mapper(id: struct vec v) map(v.len) // expected-note {{previous definition is here}}
#pragma omp declare mapper(id: struct vec v) map(v.len) // expected-error {{redefinition of user-defined mapper for type 'struct vec' with name 'id'}}
{
#pragma omp declare mapper(id: struct vec v) map(v.len) allocate(v) // expected-error {{unexpected OpenMP clause 'allocate' in directive '#pragma omp declare mapper'}}
struct vec vv, v1;
struct vec arr[10];
double d;
#pragma omp target map(mapper) // expected-error {{use of undeclared identifier 'mapper'}}
{}
#pragma omp target map(mapper:vv) // expected-error {{expected '(' after 'mapper'}}
{}
#pragma omp target map(mapper( :vv) // expected-error {{expected expression}} expected-error {{expected ')'}} expected-warning {{implicit declaration of function 'mapper' is invalid in C99}} expected-note {{to match this '('}}
{}
#pragma omp target map(mapper(aa :vv) // expected-error {{use of undeclared identifier 'aa'}} expected-error {{expected ')'}} expected-warning {{implicit declaration of function 'mapper' is invalid in C99}} expected-note {{to match this '('}}
{}
#pragma omp target map(mapper(ab) :vv) // expected-error {{missing map type}} expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}}
{}
#pragma omp target map(mapper(ab) :arr[0:2]) // expected-error {{missing map type}} expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}}
{}
#pragma omp target map(mapper(aa) :vv) // expected-error {{missing map type}}
{}
#pragma omp target map(mapper(aa) to:d) // expected-error {{mapper type must be of struct, union or class type}}
{}
#pragma omp target map(mapper(aa) to:vv) map(close mapper(aa) from:v1) map(mapper(aa) to:arr[0])
{}
#pragma omp target update to(mapper) // expected-error {{expected '(' after 'mapper'}} expected-error {{expected expression}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper() // expected-error {{illegal OpenMP user-defined mapper identifier}} expected-error {{expected expression}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper:vv) // expected-error {{expected '(' after 'mapper'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(:vv) // expected-error {{illegal OpenMP user-defined mapper identifier}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(aa :vv) // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(ab):vv) // expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(ab):arr[0:2]) // expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(aa) a:vv) // expected-warning {{missing ':' after ) - ignoring}}
#pragma omp target update to(mapper(aa):d) // expected-error {{mapper type must be of struct, union or class type}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(aa):vv) to(mapper(aa):arr[0])
#pragma omp target update from(mapper) // expected-error {{expected '(' after 'mapper'}} expected-error {{expected expression}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper() // expected-error {{illegal OpenMP user-defined mapper identifier}} expected-error {{expected expression}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper:vv) // expected-error {{expected '(' after 'mapper'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(:vv) // expected-error {{illegal OpenMP user-defined mapper identifier}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(aa :vv) // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(ab):vv) // expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(ab):arr[0:2]) // expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(aa) a:vv) // expected-warning {{missing ':' after ) - ignoring}}
#pragma omp target update from(mapper(aa):d) // expected-error {{mapper type must be of struct, union or class type}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(aa):vv) from(mapper(aa):arr[0])
}
}
return arg;
}
|
MasterEndLink.c | int x;
int main () {
#pragma omp master
{
10;
}
#pragma omp master
{
int x;
}
}
|
problem.sine.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#ifndef M_PI
#define M_PI 3.14159265358979323846 // in case math.h doesn't define it
#endif
void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition
double c1 = (Bmax+Bmin)/2;
double c3 = 10.0; // how sharply (B)eta transitions
double xcenter = 0.50;
double ycenter = 0.50;
double zcenter = 0.50;
// calculate distance from center of the domain (0.5,0.5,0.5)
double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2);
double r2x = 2.0*(x-xcenter);
double r2y = 2.0*(y-ycenter);
double r2z = 2.0*(z-zcenter);
//double r2xx = 2.0;
//double r2yy = 2.0;
//double r2zz = 2.0;
double r = pow(r2,0.5);
double rx = 0.5*r2x*pow(r2,-0.5);
double ry = 0.5*r2y*pow(r2,-0.5);
double rz = 0.5*r2z*pow(r2,-0.5);
//double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5);
//double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5);
//double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*B = c1+c2*tanh( c3*(r-0.25) );
*Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2));
*By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2));
*Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2));
}
//------------------------------------------------------------------------------------------------------------------------------
void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){
double c1 = 2.0*M_PI;
double c2 = 6.0*M_PI;
double p = 13; // must be odd(?) and allows up to p-2 order MG
*U = pow(sin(c1*x),p )*pow(sin(c1*y),p)*pow(sin(c1*z),p);
*Ux = c1*p*cos(c1*x)*pow(sin(c1*x),p-1)*pow(sin(c1*y),p)*pow(sin(c1*z),p);
*Uy = c1*p*cos(c1*y)*pow(sin(c1*y),p-1)*pow(sin(c1*x),p)*pow(sin(c1*z),p);
*Uz = c1*p*cos(c1*z)*pow(sin(c1*z),p-1)*pow(sin(c1*x),p)*pow(sin(c1*y),p);
*Uxx = c1*c1*p*( (p-1)*pow(sin(c1*x),p-2)*pow(cos(c1*x),2) - pow(sin(c1*x),p) )*pow(sin(c1*y),p)*pow(sin(c1*z),p);
*Uyy = c1*c1*p*( (p-1)*pow(sin(c1*y),p-2)*pow(cos(c1*y),2) - pow(sin(c1*y),p) )*pow(sin(c1*x),p)*pow(sin(c1*z),p);
*Uzz = c1*c1*p*( (p-1)*pow(sin(c1*z),p-2)*pow(cos(c1*z),2) - pow(sin(c1*z),p) )*pow(sin(c1*x),p)*pow(sin(c1*y),p);
*U += pow(sin(c2*x),p )*pow(sin(c2*y),p)*pow(sin(c2*z),p);
*Ux += c2*p*cos(c2*x)*pow(sin(c2*x),p-1)*pow(sin(c2*y),p)*pow(sin(c2*z),p);
*Uy += c2*p*cos(c2*y)*pow(sin(c2*y),p-1)*pow(sin(c2*x),p)*pow(sin(c2*z),p);
*Uz += c2*p*cos(c2*z)*pow(sin(c2*z),p-1)*pow(sin(c2*x),p)*pow(sin(c2*y),p);
*Uxx += c2*c2*p*( (p-1)*pow(sin(c2*x),p-2)*pow(cos(c2*x),2) - pow(sin(c2*x),p) )*pow(sin(c2*y),p)*pow(sin(c2*z),p);
*Uyy += c2*c2*p*( (p-1)*pow(sin(c2*y),p-2)*pow(cos(c2*y),2) - pow(sin(c2*y),p) )*pow(sin(c2*x),p)*pow(sin(c2*z),p);
*Uzz += c2*c2*p*( (p-1)*pow(sin(c2*z),p-2)*pow(cos(c2*z),2) - pow(sin(c2*z),p) )*pow(sin(c2*x),p)*pow(sin(c2*y),p);
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
const int dim_i = level->my_boxes[box].dim;
const int dim_j = level->my_boxes[box].dim;
const int dim_k = level->my_boxes[box].dim;
#ifdef _OPENMP
#pragma omp parallel for private(k,j,i) collapse(3)
#endif
for(k=0;k<=dim_k;k++){ // include high face
for(j=0;j<=dim_j;j++){ // include high face
for(i=0;i<=dim_i;i++){ // include high face
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );
double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );
double A,B,Bx,By,Bz,Bi,Bj,Bk;
double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
B = 1.0;
Bx = 0.0;
By = 0.0;
Bz = 0.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i
evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j
evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k
evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );
double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;
level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;
level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;
level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;
//level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U; // obviated by Richardson analysis
level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
}}}
}
}
//------------------------------------------------------------------------------------------------------------------------------
|
GB_unop__one_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__one_fp32_fp32)
// op(A') function: GB (_unop_tran__one_fp32_fp32)
// C type: float
// A type: float
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = 1 ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__one_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = 1 ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = 1 ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__one_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr35130.c | /* PR middle-end/35130 */
extern void abort (void);
void
f1 (void)
{
int a[4], k;
void nested (int x)
{
a[x] = 42;
}
for (k = 0; k < 4; k++)
a[k] = 0;
#pragma omp parallel for
for (k = 0; k < 4; k++)
nested (k);
if (a[0] != 42 || a[1] != 42 || a[2] != 42 || a[3] != 42)
abort ();
}
void
f2 (void)
{
int a[4], k;
void nested (void)
{
int l;
void nested2 (int x)
{
a[x] = 42;
}
#pragma omp parallel for
for (l = 0; l < 4; l++)
nested2 (l);
}
for (k = 0; k < 4; k++)
a[k] = 0;
nested ();
if (a[0] != 42 || a[1] != 42 || a[2] != 42 || a[3] != 42)
abort ();
}
void
f3 (void)
{
int a[4], b[4], c[4], k;
void nested (int x)
{
a[x] = b[x] = c[x] = 42;
}
for (k = 0; k < 4; k++)
a[k] = b[k] = c[k] = 0;
nested (0);
#pragma omp parallel
{
#pragma omp single
{
a[1] = 43;
b[1] = 43;
}
#pragma omp parallel
{
#pragma omp single
{
b[2] = 44;
c[2] = 44;
}
}
}
if (a[0] != 42 || a[1] != 43 || a[2] != 0 || a[3] != 0)
abort ();
if (b[0] != 42 || b[1] != 43 || b[2] != 44 || b[3] != 0)
abort ();
if (c[0] != 42 || c[1] != 0 || c[2] != 44 || c[3] != 0)
abort ();
}
void
f4 (void)
{
int a[4], b[4], c[4], k;
void nested ()
{
#pragma omp parallel
{
#pragma omp single
{
a[1] = 43;
b[1] = 43;
}
#pragma omp parallel
{
#pragma omp single
{
b[2] = 44;
c[2] = 44;
}
}
}
}
for (k = 0; k < 4; k++)
a[k] = b[k] = c[k] = k == 0 ? 42 : 0;
nested ();
if (a[0] != 42 || a[1] != 43 || a[2] != 0 || a[3] != 0)
abort ();
if (b[0] != 42 || b[1] != 43 || b[2] != 44 || b[3] != 0)
abort ();
if (c[0] != 42 || c[1] != 0 || c[2] != 44 || c[3] != 0)
abort ();
}
int
main (void)
{
f1 ();
f2 ();
f3 ();
f4 ();
return 0;
}
|
GB_binop__min_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint32)
// A*D function (colscale): GB (_AxD__min_uint32)
// D*A function (rowscale): GB (_DxB__min_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__min_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__min_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint32)
// C=scalar+B GB (_bind1st__min_uint32)
// C=scalar+B' GB (_bind1st_tran__min_uint32)
// C=A+scalar GB (_bind2nd__min_uint32)
// C=A'+scalar GB (_bind2nd_tran__min_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_UINT32 || GxB_NO_MIN_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
particle_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Author Julio Marti.
//
#if !defined(KRATOS_PARTICLES_UTILITIES_INCLUDED )
#define KRATOS_PARTICLES_UTILITIES_INCLUDED
#define PRESSURE_ON_EULERIAN_MESH
#define USE_FEW_PARTICLES
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "includes/kratos_flags.h"
#include "utilities/geometry_utilities.h"
#include "geometries/tetrahedra_3d_4.h"
#include "pfem_2_application_variables.h"
#include "spatial_containers/spatial_containers.h"
#include "utilities/timer.h"
#include "processes/node_erase_process.h"
#include "utilities/binbased_fast_point_locator.h"
//#include "utilities/enrichment_utilities.h"
#include <boost/timer.hpp>
#include "utilities/timer.h"
#ifdef _OPENMP
#include "omp.h"
#endif
namespace Kratos
{
template< class T, std::size_t dim >
class DistanceCalculator1
{
public:
double operator()(T const& p1, T const& p2)
{
double dist = 0.0;
for (std::size_t i = 0; i < dim; i++)
{
double tmp = p1[i] - p2[i];
dist += tmp*tmp;
}
return dist; //square distance because it is easier to work without the square root//
}
};
template<std::size_t TDim> class ParticleUtils
{
public:
KRATOS_CLASS_POINTER_DEFINITION(ParticleUtils<TDim>);
void EstimateTime(ModelPart& rEulerianModelPart,const double max_dt)
{
KRATOS_TRY
// KRATOS_ERROR(std::logic_error, "NEGATIVE VALUE OF Time step estimated" , "");
//initializee dt with max dt
//initialize dt with incredible value
double /*dt, glob_min_dt,*/ dummy;
// double h, nu;
array_1d<double,3> N = ZeroVector(3);
array_1d<double,3> aux = ZeroVector(3); //dimension = number of nodes
array_1d<double,3> vel = ZeroVector(3); //dimension = number of nodes
BoundedMatrix<double,3,2> DN_DX = ZeroMatrix(3,2);
array_1d<double,2> ms_vel_gauss = ZeroVector(2); //dimesion coincides with space dimension
//initialize it with given value
// glob_min_dt=max_dt;
// dt=0.0;
for(ModelPart::ElementsContainerType::iterator im = rEulerianModelPart.ElementsBegin() ; im !=rEulerianModelPart.ElementsEnd() ; ++im)
{
GeometryUtils::CalculateGeometryData(im->GetGeometry(),DN_DX,N,dummy);
double h = sqrt(2.00*dummy);
array_1d<double,3> const& v = im->GetGeometry()[0].FastGetSolutionStepValue(VELOCITY);
ms_vel_gauss[0] = v[0];
ms_vel_gauss[1] = v[1];
//direction of the height is stored in the auxilliary vector
for (unsigned int i=1; i<3; i++)
{
array_1d<double,3> const& vi = im->GetGeometry()[i].FastGetSolutionStepValue(VELOCITY);
ms_vel_gauss[0] += vi[0];
ms_vel_gauss[1] += vi[1];
}
ms_vel_gauss *=0.3333;
double norm_u = ms_vel_gauss[0]*ms_vel_gauss[0] + ms_vel_gauss[1]*ms_vel_gauss[1];
norm_u = sqrt(norm_u);
double courant= norm_u * max_dt / h;
double& counter = im->GetValue(POISSON_RATIO);
counter = courant;
}
KRATOS_CATCH("");
}
void VisualizationModelPart(ModelPart& rCompleteModelPart, ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart)
{
KRATOS_TRY;
rCompleteModelPart.Elements() = rEulerianModelPart.Elements();
rCompleteModelPart.Nodes() = rEulerianModelPart.Nodes();
unsigned int id;
if(rEulerianModelPart.Nodes().size()!= 0)
id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1;
else
id = 1;
//preallocate the memory needed
int tot_nodes = rEulerianModelPart.Nodes().size() + rLagrangianModelPart.Nodes().size();
rCompleteModelPart.Nodes().reserve( tot_nodes );
//note that here we renumber the nodes
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); node_it++)
{
node_it->SetId(id++);
rCompleteModelPart.AddNode(*(node_it.base()));
}
KRATOS_CATCH("");
}
void TransferToEulerianMesh_Face_Heat_Flux(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart)
{
KRATOS_TRY
//defintions for spatial search
typedef Node < 3 > PointType;
typedef Node < 3 > ::Pointer PointTypePointer;
typedef std::vector<PointType::Pointer> PointVector;
typedef std::vector<PointType::Pointer>::iterator PointIterator;
typedef std::vector<double> DistanceVector;
typedef std::vector<double>::iterator DistanceIterator;
//creating an auxiliary list for the new nodes
PointVector list_of_nodes;
//*************
// Bucket types
typedef Bucket< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType;
typedef Tree< KDTreePartition<BucketType> > tree; //Kdtree;
//starting calculating time of construction of the kdtree
boost::timer kdtree_construction;
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); ++node_it)
{
PointTypePointer pnode = *(node_it.base());
//putting the nodes of the destination_model part in an auxiliary list
list_of_nodes.push_back(pnode);
}
std::cout << "kdt constructin time " << kdtree_construction.elapsed() << std::endl;
//create a spatial database with the list of new nodes
unsigned int bucket_size = 20;
tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size);
//work arrays
Node < 3 > work_point(0, 0.0, 0.0, 0.0);
unsigned int MaximumNumberOfResults = 10000;
PointVector Results(MaximumNumberOfResults);
DistanceVector SquaredResultsDistances(MaximumNumberOfResults);
if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(NODAL_H) == false)
KRATOS_ERROR<<"Add ----NODAL_H---- variable!!!!!! ERROR";
double sigma = 0.0;
if (TDim == 2)
sigma = 10.0 / (7.0 * 3.1415926);
else
sigma = 1.0 / 3.1415926;
for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin(); node_it != rEulerianModelPart.NodesEnd(); node_it++)
{
if( (node_it)->FastGetSolutionStepValue(IS_FREE_SURFACE)==true or (node_it)->FastGetSolutionStepValue(IS_WATER)==1 )
{ //IS_FREE_SURFACE
work_point.X() = node_it->X();
work_point.Y() = node_it->Y();
work_point.Z() = node_it->Z();
double radius = 1.5 * node_it->FastGetSolutionStepValue(NODAL_H);
//find all of the new nodes within the radius
int number_of_points_in_radius;
//look between the new nodes which of them is inside the radius of the circumscribed cyrcle
number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(), SquaredResultsDistances.begin(), MaximumNumberOfResults);
if (number_of_points_in_radius > 0)
{
double& temperature = (node_it)->FastGetSolutionStepValue(FACE_HEAT_FLUX);
//double temperature=0.0;
double temperature_aux = 0.0;
double tot_weight = 0.0;
for (int k = 0; k < number_of_points_in_radius; k++)
{
double distance = sqrt(*(SquaredResultsDistances.begin() + k));
double weight = SPHCubicKernel(sigma, distance, radius);
PointIterator it_found = Results.begin() + k;
if((*it_found)->FastGetSolutionStepValue(IS_INTERFACE)==1) //MATERIAL_VARIABLE
{
temperature_aux += weight * (*it_found)->FastGetSolutionStepValue(INCIDENT_RADIATION_FUNCTION);//);//FACE_HEAT_FLUX
tot_weight += weight;
}
}
if(tot_weight>0.0)
{
temperature_aux /= tot_weight;
temperature +=(0.5 * temperature_aux * 1.00); //1.5 //1.25
}
}
}
}
KRATOS_CATCH("")
}
void TransferToEulerianMesh(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart)
{
KRATOS_TRY
//defintions for spatial search
typedef Node < 3 > PointType;
typedef Node < 3 > ::Pointer PointTypePointer;
typedef std::vector<PointType::Pointer> PointVector;
typedef std::vector<PointType::Pointer>::iterator PointIterator;
typedef std::vector<double> DistanceVector;
typedef std::vector<double>::iterator DistanceIterator;
//creating an auxiliary list for the new nodes
PointVector list_of_nodes;
//*************
// Bucket types
typedef Bucket< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType;
typedef Tree< KDTreePartition<BucketType> > tree; //Kdtree;
//starting calculating time of construction of the kdtree
boost::timer kdtree_construction;
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); ++node_it)
{
PointTypePointer pnode = *(node_it.base());
//putting the nodes of the destination_model part in an auxiliary list
list_of_nodes.push_back(pnode);
}
std::cout << "kdt constructin time " << kdtree_construction.elapsed() << std::endl;
//create a spatial database with the list of new nodes
unsigned int bucket_size = 20;
tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size);
//work arrays
Node < 3 > work_point(0, 0.0, 0.0, 0.0);
unsigned int MaximumNumberOfResults = 10000;
PointVector Results(MaximumNumberOfResults);
DistanceVector SquaredResultsDistances(MaximumNumberOfResults);
if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(NODAL_H) == false)
KRATOS_ERROR<<"Add ----NODAL_H---- variable!!!!!! ERROR";
double sigma = 0.0;
if (TDim == 2)
sigma = 10.0 / (7.0 * 3.1415926);
else
sigma = 1.0 / 3.1415926;
for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin(); node_it != rEulerianModelPart.NodesEnd(); node_it++)
{
if((node_it)->FastGetSolutionStepValue(IS_INTERFACE)==1)
{ //IS_FREE_SURFACE
work_point.X() = node_it->X();
work_point.Y() = node_it->Y();
work_point.Z() = node_it->Z();
//KRATOS_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
double radius = 2.0 * node_it->FastGetSolutionStepValue(NODAL_H);
//find all of the new nodes within the radius
int number_of_points_in_radius;
//look between the new nodes which of them is inside the radius of the circumscribed cyrcle
number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(), SquaredResultsDistances.begin(), MaximumNumberOfResults);
if (number_of_points_in_radius > 0)
{
//double& temperature = (node_it)->FastGetSolutionStepValue(TEMPERATURE);
double temperature_aux = 0.0;
double tot_weight = 0.0;
for (int k = 0; k < number_of_points_in_radius; k++)
{
double distance = sqrt(*(SquaredResultsDistances.begin() + k));
double weight = SPHCubicKernel(sigma, distance, radius);
PointIterator it_found = Results.begin() + k;
//if((*it_found)->FastGetSolutionStepValue(IS_BOUNDARY)>0.5) //MATERIAL_VARIABLE
if((*it_found)->FastGetSolutionStepValue(IS_FREE_SURFACE) ==1 or (*it_found)->FastGetSolutionStepValue(IS_WATER) ==1 ) //MATERIAL_VARIABLE
{
double tempp=0.0;
tempp=(*it_found)->FastGetSolutionStepValue(YCH4);
//KRATOS_ERROR(std::logic_error, "nodo without temperature", "");
if(tempp<298.0) tempp=298.0;
//else tempp=(*it_found)->FastGetSolutionStepValue(YCH4);
temperature_aux += weight * tempp;//temperature
tot_weight += weight;
//KRATOS_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
}
}
if(tot_weight>0.0)
{
temperature_aux /= tot_weight;
(node_it)->FastGetSolutionStepValue(FUEL)=temperature_aux;
}
else
{
KRATOS_WATCH(tot_weight);
KRATOS_WATCH((node_it)->X());
KRATOS_WATCH((node_it)->Y());
if((node_it)->FastGetSolutionStepValue(TEMPERATURE)<298.0) (node_it)->FastGetSolutionStepValue(FUEL)=298.0;
else (node_it)->FastGetSolutionStepValue(FUEL)=(node_it)->FastGetSolutionStepValue(TEMPERATURE);
}
}
}
else
{
//(node_it)->FastGetSolutionStepValue(FUEL)=(node_it)->FastGetSolutionStepValue(TEMPERATURE);
}
}
KRATOS_CATCH("")
}
void TransferToEulerianMeshShapeBased_aux(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY
Vector N;
const int max_results = 1000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry<Node<3> >& geom = pelement->GetGeometry();
BoundedMatrix<double, 3, 2 > msDN_DX;
array_1d<double, 3 > N;
//array_1d<double, 3 > N;
double Area=0.0;
GeometryUtils::CalculateGeometryData(geom, msDN_DX, N, Area);
int s0=0;
int s1=0;
int s2=0;
int sum=0;
if(geom[0].FastGetSolutionStepValue(IS_INTERFACE)>0.5) s0=1; //IS_INTERFACE
if(geom[1].FastGetSolutionStepValue(IS_INTERFACE)>0.5) s1=1;
if(geom[2].FastGetSolutionStepValue(IS_INTERFACE)>0.5) s2=1;
sum=s0 + s1 + s2;
array_1d<double, 2 > qrad=ZeroVector(2);
array_1d<double, 2 > qrad_P1=ZeroVector(2);
array_1d<double,2> interface_segment=ZeroVector(2);
array_1d<double,2> normaledge1=ZeroVector(2);
for (unsigned int jj = 0; jj < 2; jj++)
{
for (unsigned int kk = 0; kk < 3; kk++)
{
qrad[jj] += msDN_DX(kk, jj) * geom[kk].FastGetSolutionStepValue(TEMPERATURE);
qrad_P1[jj] += msDN_DX(kk, jj) * geom[kk].FastGetSolutionStepValue(INCIDENT_RADIATION_FUNCTION);
}
}
double faceheatflux=0.0;
//double faceheatflux_P1=0.0;
if(sum==2)
{
if((geom[1].FastGetSolutionStepValue(IS_INTERFACE)>0.5 && geom[0].FastGetSolutionStepValue(IS_INTERFACE)>0.5)) //IS_INTERFACE
{
double norm=0.0;
//KRATOS_ERROR(std::logic_error, "element with zero vol found", "");
interface_segment[0] = (geom[0].X()-geom[1].X());
interface_segment[1] = (geom[0].Y()-geom[1].Y());
norm = sqrt( pow((interface_segment[0]),2) + pow((interface_segment[1]),2));
//double area1=norm;
normaledge1(0)= -interface_segment[1]/norm;
normaledge1(1)= interface_segment[0]/norm;
faceheatflux += abs(1.0*(qrad[0]*normaledge1(0)+qrad[1]*normaledge1(1))*0.0131);
}
if((geom[1].FastGetSolutionStepValue(IS_INTERFACE)>0.5 && geom[2].FastGetSolutionStepValue(IS_INTERFACE)>0.5))
{
double norm=0.0;
interface_segment[0] = (geom[1].X()-geom[2].X());
interface_segment[1] = (geom[1].Y()-geom[2].Y());
norm = sqrt( pow((interface_segment[0]),2) + pow((interface_segment[1]),2));
//double area1=norm;
normaledge1(0)= -interface_segment[1]/norm;
normaledge1(1)= interface_segment[0]/norm;
faceheatflux += abs(1.0*(qrad[0]*normaledge1(0)+qrad[1]*normaledge1(1))*0.0131);
}
if((geom[2].FastGetSolutionStepValue(IS_INTERFACE)>0.5 && geom[0].FastGetSolutionStepValue(IS_INTERFACE)>0.5))
{
double norm=0.0;
interface_segment[0] = (geom[2].X()-geom[0].X());
interface_segment[1] = (geom[2].Y()-geom[0].Y());
norm = sqrt( pow((interface_segment[0]),2) + pow((interface_segment[1]),2));
normaledge1(0)= -interface_segment[1]/norm;
normaledge1(1)= interface_segment[0]/norm;
faceheatflux += abs(1.0*(qrad[0]*normaledge1(0)+qrad[1]*normaledge1(1))*0.0131);
}
}
if(sum==1)
{
if((geom[1].FastGetSolutionStepValue(IS_INTERFACE)<0.5 && geom[0].FastGetSolutionStepValue(IS_INTERFACE)<0.5)) //IS_INTERFACE
{
double norm=0.0;
interface_segment[0] = (geom[0].X()-geom[1].X());
interface_segment[1] = (geom[0].Y()-geom[1].Y());
norm = sqrt( pow((interface_segment[0]),2) + pow((interface_segment[1]),2));
normaledge1(0)= -interface_segment[1]/norm;
normaledge1(1)= interface_segment[0]/norm;
faceheatflux += abs(1.0*(qrad[0]*normaledge1(0)+qrad[1]*normaledge1(1))*0.0131);
}
if((geom[1].FastGetSolutionStepValue(IS_INTERFACE)<0.5 && geom[2].FastGetSolutionStepValue(IS_INTERFACE)<0.5))
{
double norm=0.0;
interface_segment[0] = (geom[1].X()-geom[2].X());
interface_segment[1] = (geom[1].Y()-geom[2].Y());
norm = sqrt( pow((interface_segment[0]),2) + pow((interface_segment[1]),2));
normaledge1(0)= -interface_segment[1]/norm;
normaledge1(1)= interface_segment[0]/norm;
faceheatflux += abs(1.0*(qrad[0]*normaledge1(0)+qrad[1]*normaledge1(1))*0.0131);
}
if((geom[2].FastGetSolutionStepValue(IS_INTERFACE)<0.5 && geom[0].FastGetSolutionStepValue(IS_INTERFACE)<0.5))
{
double norm=0.0;
interface_segment[0] = (geom[2].X()-geom[0].X());
interface_segment[1] = (geom[2].Y()-geom[0].Y());
norm = sqrt( pow((interface_segment[0]),2) + pow((interface_segment[1]),2));
normaledge1(0)= -interface_segment[1]/norm;
normaledge1(1)= interface_segment[0]/norm;
faceheatflux += abs(1.0*(qrad[0]*normaledge1(0)+qrad[1]*normaledge1(1))*0.0131);
}
}
(iparticle)->FastGetSolutionStepValue(FACE_HEAT_FLUX)+=(faceheatflux /*+ fhf+ faceheatflux_P1*/);
}
}
KRATOS_CATCH("")
}
///3D
void CalculateNormal(ModelPart& full_model_part)
{
KRATOS_TRY
//resetting the normals
array_1d<double,3> zero;
noalias(zero) = ZeroVector(3);
for(ModelPart::NodesContainerType::const_iterator in = full_model_part.NodesBegin(); in!=full_model_part.NodesEnd(); in++)
{
in->FastGetSolutionStepValue(NORMAL) = zero;
}
array_1d<double,3> v1;
array_1d<double,3> v2;
//array_1d<double,3>& An =zero;
//double area_normal=0.0;
array_1d<double,3> area_normal;
for(ModelPart::ElementsContainerType::iterator iii = full_model_part.ElementsBegin(); iii != full_model_part.ElementsEnd(); iii++)
{
if( iii->GetGeometry()[1].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0 && iii->GetGeometry()[2].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0 && iii->GetGeometry()[3].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0)
{
v1[0] = iii->GetGeometry()[1].X() -iii->GetGeometry()[3].X();
v1[1] = iii->GetGeometry()[1].Y() - iii->GetGeometry()[3].Y();
v1[2] = iii->GetGeometry()[1].Z() - iii->GetGeometry()[3].Z();
v2[0] = iii->GetGeometry()[2].X() - iii->GetGeometry()[3].X();
v2[1] = iii->GetGeometry()[2].Y() - iii->GetGeometry()[3].Y();
v2[2] = iii->GetGeometry()[2].Z() - iii->GetGeometry()[3].Z();
MathUtils<double>::CrossProduct(area_normal,v1,v2);
//area_normal *= -0.5;
array_1d<double,3> msAuxVec = ZeroVector(3);
double c0 = abs(area_normal[0]);
double c1 = abs(area_normal[1]);
double c2 = abs(area_normal[2]);
msAuxVec[0]=c0;
msAuxVec[1]=c1;
msAuxVec[2]=c2;
// double norm_c =norm_2(msAuxVec);
double norm_u = msAuxVec[0]*msAuxVec[0] + msAuxVec[1]*msAuxVec[1] + msAuxVec[2]*msAuxVec[2];
double norm_c =sqrt(norm_u);
iii->GetGeometry()[1].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
iii->GetGeometry()[2].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
iii->GetGeometry()[3].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
}
if( iii->GetGeometry()[0].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0 && iii->GetGeometry()[3].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0 && iii->GetGeometry()[2].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0)
{
v1[0] = iii->GetGeometry()[0].X() -iii->GetGeometry()[2].X();
v1[1] = iii->GetGeometry()[0].Y() - iii->GetGeometry()[2].Y();
v1[2] = iii->GetGeometry()[0].Z() - iii->GetGeometry()[2].Z();
v2[0] = iii->GetGeometry()[3].X() - iii->GetGeometry()[2].X();
v2[1] = iii->GetGeometry()[3].Y() - iii->GetGeometry()[2].Y();
v2[2] = iii->GetGeometry()[3].Z() - iii->GetGeometry()[2].Z();
MathUtils<double>::CrossProduct(area_normal,v1,v2);
//area_normal *= -0.5;
array_1d<double,3> msAuxVec = ZeroVector(3);
double c0 = abs(area_normal[0]);
double c1 = abs(area_normal[1]);
double c2 = abs(area_normal[2]);
msAuxVec[0]=c0;
msAuxVec[1]=c1;
msAuxVec[2]=c2;
//double norm_c =norm_2(msAuxVec);
double norm_u = msAuxVec[0]*msAuxVec[0] + msAuxVec[1]*msAuxVec[1] + msAuxVec[2]*msAuxVec[2];
double norm_c =sqrt(norm_u);
iii->GetGeometry()[0].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
iii->GetGeometry()[3].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
iii->GetGeometry()[2].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
}
if( iii->GetGeometry()[0].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0 && iii->GetGeometry()[1].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0 && iii->GetGeometry()[3].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0)
{
v1[0] = iii->GetGeometry()[0].X() -iii->GetGeometry()[3].X();
v1[1] = iii->GetGeometry()[0].Y() - iii->GetGeometry()[3].Y();
v1[2] = iii->GetGeometry()[0].Z() - iii->GetGeometry()[3].Z();
v2[0] = iii->GetGeometry()[1].X() - iii->GetGeometry()[3].X();
v2[1] = iii->GetGeometry()[1].Y() - iii->GetGeometry()[3].Y();
v2[2] = iii->GetGeometry()[1].Z() - iii->GetGeometry()[3].Z();
MathUtils<double>::CrossProduct(area_normal,v1,v2);
//area_normal *= -0.5;
array_1d<double,3> msAuxVec = ZeroVector(3);
double c0 = abs(area_normal[0]);
double c1 = abs(area_normal[1]);
double c2 = abs(area_normal[2]);
msAuxVec[0]=c0;
msAuxVec[1]=c1;
msAuxVec[2]=c2;
double norm_u = msAuxVec[0]*msAuxVec[0] + msAuxVec[1]*msAuxVec[1] + msAuxVec[2]*msAuxVec[2];
double norm_c =sqrt(norm_u);
iii->GetGeometry()[0].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
iii->GetGeometry()[1].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
iii->GetGeometry()[3].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
}
if( iii->GetGeometry()[0].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0 && iii->GetGeometry()[2].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0 && iii->GetGeometry()[1].FastGetSolutionStepValue(IS_BOUNDARY) == 1.0)
{
v1[0] = iii->GetGeometry()[0].X() -iii->GetGeometry()[1].X();
v1[1] = iii->GetGeometry()[0].Y() - iii->GetGeometry()[1].Y();
v1[2] = iii->GetGeometry()[0].Z() - iii->GetGeometry()[1].Z();
v2[0] = iii->GetGeometry()[2].X() - iii->GetGeometry()[1].X();
v2[1] = iii->GetGeometry()[2].Y() - iii->GetGeometry()[1].Y();
v2[2] = iii->GetGeometry()[2].Z() - iii->GetGeometry()[1].Z();
MathUtils<double>::CrossProduct(area_normal,v1,v2);
//area_normal *= -0.5;
array_1d<double,3> msAuxVec = ZeroVector(3);
double c0 = abs(area_normal[0]);
double c1 = abs(area_normal[1]);
double c2 = abs(area_normal[2]);
msAuxVec[0]=c0;
msAuxVec[1]=c1;
msAuxVec[2]=c2;
// double norm_c =norm_2(msAuxVec);
double norm_u = msAuxVec[0]*msAuxVec[0] + msAuxVec[1]*msAuxVec[1] + msAuxVec[2]*msAuxVec[2];
double norm_c =sqrt(norm_u);
iii->GetGeometry()[0].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
iii->GetGeometry()[2].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
iii->GetGeometry()[1].FastGetSolutionStepValue(NORMAL) += area_normal/ norm_c;
}
}
for(ModelPart::NodesContainerType::iterator iii = full_model_part.NodesBegin(); iii != full_model_part.NodesEnd(); iii++)
{
if(iii->FastGetSolutionStepValue(IS_BOUNDARY)==1.0){
array_1d<double,3>& value_y1 = iii->FastGetSolutionStepValue(NORMAL);
double norm_y1 =norm_2(value_y1);
value_y1 /=(norm_y1 + 1e-9);
}
}
KRATOS_CATCH("")
}
void TransferToEulerianMeshShapeBased_aux_3D(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY
//typedef Node < 3 > PointType;
//typedef Node < 3 > ::Pointer PointTypePointer;
Vector N;
const int max_results = 1000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry<Node<3> >& geom = pelement->GetGeometry();
BoundedMatrix<double, 4, 3 > msDN_DX;
array_1d<double, 4 > N;
double Area=0.0;
GeometryUtils::CalculateGeometryData(geom, msDN_DX, N, Area);
array_1d<double, 3 > qrad=ZeroVector(3);
double temmp=0.0;
for (unsigned int jj = 0; jj < 3; jj++)
{
for (unsigned int kk = 0; kk < 4; kk++)
{
temmp=geom[kk].FastGetSolutionStepValue(TEMPERATURE);
if(temmp<298.0) temmp=298.0;
qrad[jj] += msDN_DX(kk, jj) * temmp;//geom[kk].FastGetSolutionStepValue(TEMPERATURE);
}
}
//double faceheatflux=0.0;
(iparticle)->FastGetSolutionStepValue(NORMAL) *=(-1.0);
(iparticle)->FastGetSolutionStepValue(FACE_HEAT_FLUX) += abs( (iparticle)->FastGetSolutionStepValue(NORMAL_X) * qrad[0] + (iparticle)->FastGetSolutionStepValue(NORMAL_Y) * qrad[1] + (iparticle)->FastGetSolutionStepValue(NORMAL_Z) * qrad[2]) *0.0131;
}
}
KRATOS_CATCH("")
}
//restarting the step from the beginning
void RestartStep(ModelPart & rModelPart)
{
KRATOS_TRY;
//setting the variables to their value at the beginning of the time step
rModelPart.OverwriteSolutionStepData(1, 0);
//setting the coordinates to their value at the beginning of the step
for (ModelPart::NodesContainerType::iterator node_it = rModelPart.NodesBegin();node_it != rModelPart.NodesEnd(); node_it++)
{
array_1d<double, 3 > & coords = node_it->Coordinates();
const array_1d<double, 3 > & old_disp = node_it->FastGetSolutionStepValue(DISPLACEMENT, 1);
coords[0] = node_it->X0() + old_disp[0];
coords[1] = node_it->Y0() + old_disp[1];
coords[2] = node_it->Z0() + old_disp[2];
}
KRATOS_CATCH("");
}
void MoveMesh_Streamlines_freesurfaceflows(ModelPart& rModelPart, unsigned int substeps)
{
const double dt = rModelPart.GetProcessInfo()[DELTA_TIME];
//KRATOS_ERROR(std::logic_error, "element with zero vol found", "");
BinBasedFastPointLocator<TDim> SearchStructure(rModelPart);
SearchStructure.UpdateSearchDatabase();
//do movement
array_1d<double, 3 > veulerian;
//double temperature=0.0;
array_1d<double, 3 > acc_particle;
Vector N;
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N,veulerian,acc_particle)
for (int i = 0; i < nparticles; i++)
{
//int substep = 0;
int subdivisions = 5;
//double temperature=0.0;
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
//small_dt = dt / subdivisions;
bool do_move = true;
bool first_time=false;
iparticle->FastGetSolutionStepValue(DISTANCE)=0.0;
iparticle->FastGetSolutionStepValue(EMBEDDED_VELOCITY) = iparticle->FastGetSolutionStepValue(VELOCITY,1); //AUX_VEL
if(iparticle->Is(SLIP)) do_move = false;
//iparticle->FastGetSolutionStepValue(TEMPERATURE) = 298.0;
if( do_move == true ) //note that we suppose the velocity components to be all fixed
{
array_1d<double,3> old_position = pparticle->Coordinates();
array_1d<double,3> current_position = pparticle->Coordinates();
noalias(iparticle->GetInitialPosition()) = old_position;
iparticle->FastGetSolutionStepValue(DISPLACEMENT,1) = ZeroVector(3);
//array_1d<double, 3 > & vel_particle = iparticle->FastGetSolutionStepValue(VELOCITY);
//subdivisions=10;
const double small_dt = dt / subdivisions;
//
for (int substep = 0; substep < subdivisions; substep++)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = SearchStructure.FindPointOnMesh(current_position, N, pelement, result_begin, max_results);
iparticle->Set(TO_ERASE, true);
//(iparticle)->GetValue(ERASE_FLAG) = true;
//KRATOS_WATCH(is_found);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
//int nn=0;
noalias(veulerian) = ZeroVector(3); //0.0;//N[0] * geom[0].FastGetSolutionStepValue(VELOCITY,1);
//temperature=0.0;//N[0] * geom[0].FastGetSolutionStepValue(TEMPERATURE);
for (unsigned int k = 0; k < geom.size(); k++)
{
noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY,1);
}
/*if(iparticle->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET)==1)
{
veulerian(0)*=0.0;
veulerian(2)*=0.0;
}*/
first_time=true;
noalias(current_position) += small_dt*veulerian;
pparticle->Set(TO_ERASE, false);
iparticle->FastGetSolutionStepValue(DISTANCE) += small_dt;
iparticle->FastGetSolutionStepValue(EMBEDDED_VELOCITY)=veulerian;
}
else
{
double time1=iparticle->FastGetSolutionStepValue(DISTANCE);
array_1d<double,3> acc;
acc[0] = 0.0;
acc[1] = -10.0;
acc[2] = 0.0;
if( first_time == false /*&& iparticle->Is(SLIP) == false*/ )
{
noalias(current_position) += small_dt *iparticle->FastGetSolutionStepValue(EMBEDDED_VELOCITY);
//noalias(current_position) += small_dt * small_dt * acc;
pparticle->Set(TO_ERASE, false);
}
else
{
time1 -=small_dt;
//double tiempo_restante=dt-time1;
noalias(current_position) += small_dt *iparticle->FastGetSolutionStepValue(EMBEDDED_VELOCITY);
//noalias(current_position) += small_dt * small_dt * acc;
pparticle->Set(TO_ERASE, false);
}
}
}//for
//update the displacement BUT DO NOT OVERWRITE THE POSITION!!
iparticle->FastGetSolutionStepValue(DISPLACEMENT) = current_position - iparticle->GetInitialPosition();
//KRATOS_WATCH(iparticle->FastGetSolutionStepValue(DISPLACEMENT));
}//move
}
//compute mesh velocity
for(ModelPart::NodesContainerType::iterator it = rModelPart.NodesBegin(); it!=rModelPart.NodesEnd(); it++)
{
//array_1d<double,3>& dn = it->FastGetSolutionStepValue(DISPLACEMENT,1);
array_1d<double,3>& dn1 = it->FastGetSolutionStepValue(DISPLACEMENT);
noalias(it->Coordinates()) = it->GetInitialPosition();
noalias(it->Coordinates()) += dn1;
}
}
void MoveLonelyNodes(ModelPart& ThisModelPart)
{
KRATOS_TRY;
double Dt = ThisModelPart.GetProcessInfo()[DELTA_TIME];
array_1d<double,3> DeltaDisp, acc;
for(ModelPart::NodeIterator i = ThisModelPart.NodesBegin() ;
i != ThisModelPart.NodesEnd() ; ++i)
{
if(
(i)->Is(SLIP) == false &&
(i)->GetValue(NEIGHBOUR_ELEMENTS).size() == 0 &&
((i)->GetDof(VELOCITY_X).IsFixed() == false || (i)->GetDof(VELOCITY_Y).IsFixed() == false || (i)->GetDof(VELOCITY_Z).IsFixed() == false)
)
{
//i->Set(TO_ERASE,true);
//set to zero the pressure
(i)->FastGetSolutionStepValue(PRESSURE) = 0;
const array_1d<double,3>& old_vel = (i)->FastGetSolutionStepValue(VELOCITY,1);
array_1d<double,3>& vel = (i)->FastGetSolutionStepValue(VELOCITY);
//array_1d<double,3>& acc = (i)->FastGetSolutionStepValue(ACCELERATION);
noalias(acc) = (i)->FastGetSolutionStepValue(BODY_FORCE);
acc[0]= 0.0;
acc[1]= -10.0;
acc[2]= 0.0;
noalias(vel) = old_vel;
noalias(vel) += Dt * acc ;
//calculate displacements
//noalias(DeltaDisp) = Dt * vel;
//array_1d<double,3>& disp = i->FastGetSolutionStepValue(DISPLACEMENT);
//noalias(disp) = i->FastGetSolutionStepValue(DISPLACEMENT,1);
//noalias(disp) += DeltaDisp;
noalias(i->Coordinates()) += Dt * Dt * acc;
}
}
KRATOS_CATCH("")
}
void MarkExcessivelyCloseNodes(ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY;
KRATOS_WATCH("ENTERD Mark close nodes")
//double fact2 = admissible_distance_factor*admissible_distance_factor;
for(ModelPart::NodesContainerType::iterator in = rNodes.begin(); in!=rNodes.end(); in++)
{
if(in->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) ==1) //if it is not a wall node i can erase
{
int nf=0;
//loop on neighbours and erase if they are too close
for( GlobalPointersVector< Node<3> >::iterator i = in->GetValue(NEIGHBOUR_NODES).begin(); i != in->GetValue(NEIGHBOUR_NODES).end(); i++)
{
//KRATOS_ERROR(std::logic_error, "element with zero vol found", "");
if( /*i->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) ==1 and*/ i->FastGetSolutionStepValue(IS_FREE_SURFACE) ==1) //we can erase the current node only if the neighb is not to be erased
{
//KRATOS_ERROR(std::logic_error, "element with zero vol found", "");
nf++;
//KRATOS_WATCH(nf)
}
if(nf>=2) {in->FastGetSolutionStepValue(IS_WATER)= 1;
//KRATOS_ERROR(std::logic_error, "element with zero vol found", "");
}
}
}
}
KRATOS_CATCH("")
}
void TransferToParticlesAirVelocity(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY
//defintions for spatial search
//typedef Node < 3 > PointType;
//typedef Node < 3 > ::Pointer PointTypePointer;
Vector N;
const int max_results = 1000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry<Node<3> >& geom = pelement->GetGeometry();
BoundedMatrix<double, 4, 3 > msDN_DX;
array_1d<double, 4 > N;
double Area=0.0;
GeometryUtils::CalculateGeometryData(geom, msDN_DX, N, Area);
array_1d<double, 3 > velocity=ZeroVector(3);
array_1d<double, 3 > temmp=ZeroVector(3);
//double temmp=0.0;
for (unsigned int jj = 0; jj < 3; jj++)
{
temmp=geom[jj].FastGetSolutionStepValue(VELOCITY);
velocity =N(jj) * temmp;
}
//KRATOS_WATCH(qrad);
//double faceheatflux=0.0;
(iparticle)->FastGetSolutionStepValue(ANGULAR_VELOCITY) = velocity;
}
}
KRATOS_CATCH("")
}
double Calculate_Vol(ModelPart & rLagrangianModelPart)
{
KRATOS_TRY
//defintions for spatial search
//typedef Node < 3 > PointType;
//typedef Node < 3 > ::Pointer PointTypePointer;
//particles
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++)
{
if( node_it->GetValue(NEIGHBOUR_ELEMENTS).size() != 0) (node_it)->FastGetSolutionStepValue(K0) = 0.0;
//if( node_it->FastGetSolutionStepValue(NODAL_MASS) == 0.0) KRATOS_ERROR(std::logic_error, "element with zero vol found", "");
}
for (ModelPart::ElementsContainerType::iterator el_it = rLagrangianModelPart.ElementsBegin();el_it != rLagrangianModelPart.ElementsEnd(); el_it++)
{
Geometry<Node < 3 > >& geom = el_it->GetGeometry();
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double area=0.0;
area=CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
geom[0].FastGetSolutionStepValue(K0) += area * 0.25;
geom[1].FastGetSolutionStepValue(K0) += area * 0.25;
geom[2].FastGetSolutionStepValue(K0) += area * 0.25;
geom[3].FastGetSolutionStepValue(K0) += area * 0.25;
}
double sum=0.0;
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++)
{
sum +=(node_it)->FastGetSolutionStepValue(K0) ;
}
return sum;
KRATOS_CATCH("")
}
void DetectAllOilClusters(ModelPart & mp_local_model_part)
{
int mnumber_of_oil_clusters=0;
for (ModelPart::NodesContainerType::iterator inode = mp_local_model_part.NodesBegin(); inode != mp_local_model_part.NodesEnd(); inode++)
{
inode->FastGetSolutionStepValue(DIAMETER) = -1; //OIL_CLUSTER
}
for (ModelPart::ElementsContainerType::iterator ielem = mp_local_model_part.ElementsBegin();ielem != mp_local_model_part.ElementsEnd(); ielem++)
{
Geometry< Node<3> >& geom = ielem->GetGeometry();
if(geom.size()>1)
{
ielem->GetValue(DIAMETER) = -1;
}
}
//fist we paint all the nodes connected to the outlet:
int color = 0;
for (ModelPart::NodesContainerType::iterator inode = mp_local_model_part.NodesBegin(); inode != mp_local_model_part.NodesEnd(); inode++)
{
if(inode->IsFixed(POROSITY) && inode->FastGetSolutionStepValue(DIAMETER)!=0) //nodes connected to the outlet are flagged as cluster zero. // if(inode->IsFixed(CONNECTED_TO_OUTLET) && inode->FastGetSolutionStepValue(OIL_CLUSTER)!=0)
{
ColorOilClusters(inode, 0);
}
}
//having painted those nodes, we proceed with the rest of the colours
for (ModelPart::NodesContainerType::iterator inode = mp_local_model_part.NodesBegin(); inode != mp_local_model_part.NodesEnd(); inode++)
{
if(inode->FastGetSolutionStepValue(DIAMETER) < 0 )
{
color++;
ColorOilClusters(inode, color);
}
}
for (ModelPart::ElementsContainerType::iterator ielem = mp_local_model_part.ElementsBegin(); ielem != mp_local_model_part.ElementsEnd(); ielem++)
{
Geometry< Node<3> >& geom = ielem->GetGeometry();
if(geom.size()>1 && ielem->GetValue(DIAMETER) < 0 )
{
color++;
ielem->GetValue(DIAMETER) = color;
}
}
//finally we flag the nodes with cluster=0 as connected to outlet
for (ModelPart::NodesContainerType::iterator inode = mp_local_model_part.NodesBegin(); inode != mp_local_model_part.NodesEnd(); inode++)
{
if(inode->FastGetSolutionStepValue(DIAMETER) == 0)
inode->FastGetSolutionStepValue(POROSITY)=1.0;
}
mnumber_of_oil_clusters = color;
double area=0.0;
array_1d<double, 3 > velocity_a=ZeroVector(3);
array_1d<double, 3 > velocity_p=ZeroVector(3);
array_1d<double, 3 > temmp=ZeroVector(3);
array_1d<double, 3 > drag_coefficient=ZeroVector(3);
//KRATOS_WATCH(mnumber_of_oil_clusters);
int zz= mnumber_of_oil_clusters + 1;
for(int jj=0; jj< zz; jj++ )
{
if(jj!=0)
{
if(jj==0) KRATOS_ERROR<<"element with zero vol found";
//KRATOS_ERROR(std::logic_error, "element with zero vol found", "");
area=0.0;
velocity_a=ZeroVector(3);
velocity_p=ZeroVector(3);
drag_coefficient=ZeroVector(3);
int nn=0;
for (ModelPart::NodesContainerType::iterator inode = mp_local_model_part.NodesBegin(); inode != mp_local_model_part.NodesEnd(); inode++)
{
int colour_p = (inode)->FastGetSolutionStepValue(DIAMETER);
if(colour_p==jj)
{
area += (inode)->FastGetSolutionStepValue(K0);
velocity_a += (inode)->FastGetSolutionStepValue(ANGULAR_VELOCITY);
velocity_p += (inode)->FastGetSolutionStepValue(VELOCITY);
nn++;
}
}
velocity_a *=(1.0/nn);
velocity_p *=(1.0/nn);
//KRATOS_WATCH("AREA_ANTES");
//KRATOS_WATCH("area");
ComputedDragCoefficient(area, velocity_a, velocity_p, drag_coefficient );
for (ModelPart::NodesContainerType::iterator inode = mp_local_model_part.NodesBegin(); inode != mp_local_model_part.NodesEnd(); inode++)
{
int colour_p = (inode)->FastGetSolutionStepValue(DIAMETER);
if(colour_p==jj)
{
inode->FastGetSolutionStepValue(DRAG_FORCE_X)=drag_coefficient(0);
inode->FastGetSolutionStepValue(DRAG_FORCE_Y)=drag_coefficient(1);
inode->FastGetSolutionStepValue(DRAG_FORCE_Z)=drag_coefficient(2);
}
}
}
}
}
void ComputedDragCoefficient(double nodal_mass, array_1d<double, 3> velocity_air, array_1d<double, 3> velocity_polymer, array_1d<double, 3> & drag_coefficient )
{
KRATOS_TRY
double drag_coeff=0.0;
//array_1d<double, 3> drag_coefficient=ZeroVector(3);
//nodal_mass=0.0;
array_1d<double, 3> vrelative;
//nodal_mass=(node_it)->FastGetSolutionStepValue(NODAL_MASS);
double aux=nodal_mass * 3.0/(3.0*3.1416);
double Radius= pow(aux, 0.3333333);
double area=4.0 * 3.1416 * Radius * Radius;
noalias(vrelative)=velocity_air-velocity_polymer;
double norm_u = norm_2(vrelative);
double reynolds = 2 * Radius * norm_u / 0.00001; // 2 * mRadius * mNormOfSlipVel / mKinematicViscosity
if (reynolds < 0.01)
{
reynolds = 0.01;
}
CalculateNewtonianDragCoefficient(reynolds, drag_coeff);
noalias(drag_coefficient) = 0.5 * 1.0 * area * drag_coeff * norm_u* vrelative * (1.0 / nodal_mass); //drag_coeff = 0.5 * mFluidDensity * area * drag_coeff * mNormOfSlipVel;
KRATOS_CATCH("")
}
void CalculateNewtonianDragCoefficient(const double reynolds, double& drag_coeff)
{
KRATOS_TRY
if (reynolds < 1){
drag_coeff = 24.0; // Reynolds;
}
else {
if (reynolds > 1000){
drag_coeff = 0.44;
}
else{
drag_coeff = 24.0 / reynolds * (1.0 + 0.15 * pow(reynolds, 0.687));
}
}
KRATOS_CATCH("")
}
void ColorOilClusters(ModelPart::NodesContainerType::iterator iNode, const int color)
{
if(iNode->GetSolutionStepValue(DIAMETER) < 0 ) // if(iNode->GetSolutionStepValue(OIL_CLUSTER) < 0 && ( water_fraction<0.99999999999999 || theta>1.57079632679) )
iNode->GetSolutionStepValue(DIAMETER)=color;
ModelPart::NodesContainerType front_nodes;
GlobalPointersVector<Element >& r_neighbour_elements = iNode->GetValue(NEIGHBOUR_ELEMENTS);
for(GlobalPointersVector<Element >::iterator i_neighbour_element = r_neighbour_elements.begin() ; i_neighbour_element != r_neighbour_elements.end() ; i_neighbour_element++)
{
if(i_neighbour_element->GetValue(DIAMETER) < 0 )
{
i_neighbour_element->SetValue(DIAMETER, color);
Element::GeometryType& p_geometry = i_neighbour_element->GetGeometry();
for(unsigned int i = 0; i < p_geometry.size(); i++)
{
if(p_geometry[i].GetSolutionStepValue(DIAMETER) < 0 )
{
p_geometry[i].GetSolutionStepValue(DIAMETER) = color;
front_nodes.push_back(p_geometry(i));
}
}
}
}
while(!front_nodes.empty())
{
ModelPart::NodesContainerType new_front_nodes;
for(ModelPart::NodesContainerType::iterator i_node = front_nodes.begin() ; i_node != front_nodes.end() ; i_node++)
{
GlobalPointersVector<Element >& r_neighbour_elements = i_node->GetValue(NEIGHBOUR_ELEMENTS);
for(GlobalPointersVector<Element >::iterator i_neighbour_element = r_neighbour_elements.begin() ; i_neighbour_element != r_neighbour_elements.end() ; i_neighbour_element++)
{
if(i_neighbour_element->GetValue(DIAMETER) < 0 )
{
i_neighbour_element->SetValue(DIAMETER, color);
Element::GeometryType& p_geometry = i_neighbour_element->GetGeometry();
for(unsigned int i = 0; i < p_geometry.size(); i++)
{
if(p_geometry[i].GetSolutionStepValue(DIAMETER) < 0 )
{
p_geometry[i].GetSolutionStepValue(DIAMETER) = color;
new_front_nodes.push_back(p_geometry(i));
}
}
}
}
}
front_nodes.clear();// (o resize ( 0 ), si clear no existe)
for( ModelPart::NodesContainerType::iterator i_node = new_front_nodes.begin() ; i_node != new_front_nodes.end() ; i_node++)
front_nodes.push_back(*(i_node.base()));
}
}
void movethermocouples(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY
array_1d<double, 3 > veulerian;
double temperature;
Vector N;
//double G;
const int max_results = 1000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
double dt =0.01;
const int nparticles = rLagrangianModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N,veulerian,temperature)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
int subdivisions=5.0;
const double small_dt = dt / subdivisions;
for (unsigned int substep = 0; substep < subdivisions; substep++)
{
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
//move according to the streamline
noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY, 1);
temperature = N[0] * geom[0].FastGetSolutionStepValue(YCH4);
for (unsigned int k = 1; k < geom.size(); k++)
{
noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY, 1);
temperature += N[k] * geom[k].FastGetSolutionStepValue(YCH4);
//KRATOS_WATCH(geom[k].FastGetSolutionStepValue(YCH4));
}
double & temp = (iparticle)->FastGetSolutionStepValue(YCH4);
temp =temperature;
veulerian(0) *=0.0;
veulerian(2) *=0.0;
array_1d<double, 3 > & disp = (iparticle)->FastGetSolutionStepValue(DISPLACEMENT);
noalias(disp) += small_dt*veulerian;
noalias(iparticle->Coordinates()) = iparticle->GetInitialPosition();
noalias(iparticle->Coordinates()) += iparticle->FastGetSolutionStepValue(DISPLACEMENT);
}
}
}
KRATOS_CATCH("")
}
void TransferToEulerianMesh_2(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart)
{
KRATOS_TRY
//defintions for spatial search
typedef Node < 3 > PointType;
typedef Node < 3 > ::Pointer PointTypePointer;
typedef std::vector<PointType::Pointer> PointVector;
typedef std::vector<PointType::Pointer>::iterator PointIterator;
typedef std::vector<double> DistanceVector;
typedef std::vector<double>::iterator DistanceIterator;
//creating an auxiliary list for the new nodes
PointVector list_of_nodes;
//*************
// Bucket types
typedef Bucket< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType;
typedef Tree< KDTreePartition<BucketType> > tree; //Kdtree;
//starting calculating time of construction of the kdtree
boost::timer kdtree_construction;
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); ++node_it)
{
PointTypePointer pnode = *(node_it.base());
//putting the nodes of the destination_model part in an auxiliary list
list_of_nodes.push_back(pnode);
}
std::cout << "kdt constructin time " << kdtree_construction.elapsed() << std::endl;
//create a spatial database with the list of new nodes
unsigned int bucket_size = 20;
tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size);
//work arrays
Node < 3 > work_point(0, 0.0, 0.0, 0.0);
unsigned int MaximumNumberOfResults = 10000;
PointVector Results(MaximumNumberOfResults);
DistanceVector SquaredResultsDistances(MaximumNumberOfResults);
if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(NODAL_H) == false)
KRATOS_ERROR<<"Add ----NODAL_H---- variable!!!!!! ERROR";
double sigma = 0.0;
if (TDim == 2)
sigma = 10.0 / (7.0 * 3.1415926);
else
sigma = 1.0 / 3.1415926;
for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin(); node_it != rEulerianModelPart.NodesEnd(); node_it++)
{
if((node_it)->Y()< -0.15102 )
{
work_point.X() = node_it->X();
work_point.Y() = node_it->Y();
work_point.Z() = node_it->Z();
double radius = 2.0 * node_it->FastGetSolutionStepValue(NODAL_H);
//find all of the new nodes within the radius
int number_of_points_in_radius;
//look between the new nodes which of them is inside the radius of the circumscribed cyrcle
number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(), SquaredResultsDistances.begin(), MaximumNumberOfResults);
if (number_of_points_in_radius > 0)
{
//double& temperature = (node_it)->FastGetSolutionStepValue(TEMPERATURE);
double temperature_aux = 0.0;
double tot_weight = 0.0;
double C = 1.19e15;
double E_over_R = 24067.0;
for (int k = 0; k < number_of_points_in_radius; k++)
{
double distance = sqrt(*(SquaredResultsDistances.begin() + k));
double weight = SPHCubicKernel(sigma, distance, radius);
PointIterator it_found = Results.begin() + k;
if( (*it_found)->FastGetSolutionStepValue(DIAMETER) >0 && (*it_found)->FastGetSolutionStepValue(YN2) ==0.0) //MATERIAL_VARIABLE
{
double tempp=0.0;
tempp=(*it_found)->FastGetSolutionStepValue(YCH4);
//if(tempp<298.0) tempp=298.0;
//else tempp=(*it_found)->FastGetSolutionStepValue(YCH4);
temperature_aux += weight * 27400.0 * 1.0 * C * exp(-E_over_R / tempp ) * 905.0 ;//temperature
tot_weight += weight;
}
}
if(tot_weight>0.0)
{
//(node_it)->FastGetSolutionStepValue(FUEL)=1500.0;
//double nodal_mass = node_it->FastGetSolutionStepValue(NODAL_MASS);
//KRATOS_WATCH(node_it->FastGetSolutionStepValue(NODAL_MASS))
//double& heat = node_it->FastGetSolutionStepValue(HEAT_FLUX);
temperature_aux /= (tot_weight );
//temperature= 1500.0;//temperature_aux;
if(temperature_aux>1e9) (node_it)->FastGetSolutionStepValue(HEAT_FLUX) += 1e9;
else (node_it)->FastGetSolutionStepValue(HEAT_FLUX) += temperature_aux;
}
}
}
}
KRATOS_CATCH("")
}
void TransferToEulerianMeshShapeBased(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin(); node_it != rEulerianModelPart.NodesEnd(); node_it++)
{
(node_it)->GetValue(POISSON_RATIO) = 0.0;
(node_it)->GetValue(YOUNG_MODULUS) = 0.0;
(node_it)->GetValue(NODAL_MASS) = 0.0;
(node_it)->GetValue(HEAT_FLUX) = 0.0;
}
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
el_it->SetValue(YOUNG_MODULUS,0.0);
}
Vector N;
const int max_results = 1000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
double C = 1.19e15;
double E_over_R = 24067.0;
double A=0.0;
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
//KRATOS_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", "");
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry<Node<3> >& geom = pelement->GetGeometry();
const array_1d<double, 3 > & vel_particle = (iparticle)->FastGetSolutionStepValue(VELOCITY);
double density_particle = (iparticle)->FastGetSolutionStepValue(DENSITY);
double Tp=(iparticle)->FastGetSolutionStepValue(YCH4); //HEAT_FLUX
if(Tp>1000.0) Tp=1000.0;
double temperature = 0.3e+8;//1000000.0;//(iparticle)->FastGetSolutionStepValue(TEMPERATURE);
temperature = 2e+7;
if( (iparticle)->FastGetSolutionStepValue(DIAMETER)>0)// ((iparticle)->GetValue(NEIGHBOUR_ELEMENTS)).size() == 0)
{
(iparticle)->FastGetSolutionStepValue(YN2)=1.0;
// KRATOS_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", "");
KRATOS_WATCH("aloneeeeeeeeeeeeeeeeeeeee");
KRATOS_WATCH((iparticle)->FastGetSolutionStepValue(DIAMETER));
for (unsigned int k = 0; k < geom.size(); k++)
{
//KRATOS_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", "");
geom[k].SetLock();
geom[k].GetValue(YOUNG_MODULUS) += N[k] * 27400.0 * 1.0 * C * exp(-E_over_R/(Tp)) *905.0 * (iparticle)->FastGetSolutionStepValue(K0);//0.0001;
KRATOS_WATCH("K0");
KRATOS_WATCH((iparticle)->FastGetSolutionStepValue(K0));
geom[k].GetValue(POISSON_RATIO) += N[k];
geom[k].UnSetLock();
}
}
}
}
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
Geometry<Node < 3 > >& geom = el_it->GetGeometry();
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double area=0.0;
//if(TDim==2) area=CalculateVol(x0, y0, x1, y1, x2, y2);
//else
area=CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
geom[0].FastGetSolutionStepValue(NODAL_MASS) += area * 0.25;
geom[1].FastGetSolutionStepValue(NODAL_MASS) += area * 0.25;
geom[2].FastGetSolutionStepValue(NODAL_MASS) += area * 0.25;
geom[3].FastGetSolutionStepValue(NODAL_MASS) += area * 0.25;
}
for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin(); node_it != rEulerianModelPart.NodesEnd(); node_it++)
{
const double NN = (node_it)->GetValue(POISSON_RATIO);
const double tt = (node_it)->GetValue(YOUNG_MODULUS);
if (NN != 0.0)
{
//KRATOS_ERROR(std::logic_error, "element with zero vol found", "");
//KRATOS_WATCH(tt);
//KRATOS_WATCH(NN);
double nodal_mass = node_it->FastGetSolutionStepValue(NODAL_MASS);
double& heat = node_it->FastGetSolutionStepValue(HEAT_FLUX);
double heat_value=tt/NN * (1.0/nodal_mass);
if (heat_value>1e9 /*0.5e+7*/) heat_value = 1e9 /*0.5e+7*/; //0.5e+7;
heat= heat_value;
KRATOS_WATCH(heat);
}
//}
}
// Timer::Stop("Interpolacion");
//KRATOS_WATCH(time)
KRATOS_CATCH("")
}
private:
inline double SPHCubicKernel(const double sigma, const double r, const double hmax)
{
double h_half = 0.5 * hmax;
const double s = r / h_half;
const double coeff = sigma / pow(h_half, static_cast<int>(TDim));
if (s <= 1.0)
return coeff * (1.0 - 1.5 * s * s + 0.75 * s * s * s);
else if (s <= 2.0)
return 0.25 * coeff * pow(2.0 - s, 3);
else
return 0.0;
}
inline void CalculateCenterAndSearchRadius(Geometry<Node < 3 > >&geom, double& xc, double& yc, double& zc, double& R, array_1d<double, 3 > & N )
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
xc = 0.3333333333333333333 * (x0 + x1 + x2);
yc = 0.3333333333333333333 * (y0 + y1 + y2);
zc = 0.0;
double R1 = (xc - x0)*(xc - x0) + (yc - y0)*(yc - y0);
double R2 = (xc - x1)*(xc - x1) + (yc - y1)*(yc - y1);
double R3 = (xc - x2)*(xc - x2) + (yc - y2)*(yc - y2);
R = R1;
if (R2 > R) R = R2;
if (R3 > R) R = R3;
R = 1.01 * sqrt(R);
}
inline void CalculateCenterAndSearchRadius(Geometry<Node < 3 > >&geom, double& xc, double& yc, double& zc, double& R, array_1d<double, 4 > & N )
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
xc = 0.25 * (x0 + x1 + x2 + x3);
yc = 0.25 * (y0 + y1 + y2 + y3);
zc = 0.25 * (z0 + z1 + z2 + z3);
double R1 = (xc - x0)*(xc - x0) + (yc - y0)*(yc - y0) + (zc - z0)*(zc - z0);
double R2 = (xc - x1)*(xc - x1) + (yc - y1)*(yc - y1) + (zc - z1)*(zc - z1);
double R3 = (xc - x2)*(xc - x2) + (yc - y2)*(yc - y2) + (zc - z2)*(zc - z2);
double R4 = (xc - x3)*(xc - x3) + (yc - y3)*(yc - y3) + (zc - z3)*(zc - z3);
R = R1;
if (R2 > R) R = R2;
if (R3 > R) R = R3;
if (R4 > R) R = R4;
R = sqrt(R);
}
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,const double xc, const double yc, const double zc, array_1d<double, 4 > & N )
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
double inv_vol = 0.0;
if (vol < 0.0000000000001)
{
KRATOS_ERROR<<"element with zero vol found";
}
else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
inline double CalculateVol(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2
)
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2,const double x3, const double y3, const double z3 )
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
void ComputeGaussPointPositions(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos, BoundedMatrix<double, 4, 3 > & N)
{
double one_third = 1.0 / 3.0;
double one_sixt = 1.0 / 6.0;
double two_third = 2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
void ComputeGaussPointPositions(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & pos, BoundedMatrix<double, 16, 3 > & N)
{
//lower diagonal terms
double ypos = 1.0 / 12.0;
int pos_counter = 0;
for (unsigned int i = 0; i < 4; i++)
{
double xpos = 1.0 / 12.0;
for (unsigned int j = 0; j < 4 - i; j++)
{
double N1 = xpos;
double N2 = ypos;
double N3 = 1.0 - xpos - ypos;
pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X();
pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y();
pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z();
N(pos_counter, 0) = N1;
N(pos_counter, 1) = N2;
N(pos_counter, 2) = N3;
xpos += 1.0 / 4.0;
pos_counter += 1;
}
ypos += 1.0 / 4.0;
}
//lower diagonal terms
ypos = 2.0 / 12.0;
// pos_counter = 8;
for (unsigned int i = 0; i < 3; i++)
{
double xpos = 2.0 / 12.0;
for (unsigned int j = 0; j < 4 - i; j++)
{
double N1 = xpos;
double N2 = ypos;
double N3 = 1.0 - xpos - ypos;
pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X();
pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y();
pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z();
N(pos_counter, 0) = N1;
N(pos_counter, 1) = N2;
N(pos_counter, 2) = N3;
xpos += 1.0 / 4.0;
pos_counter += 1;
}
ypos += 1.0 / 4.0;
}
}
void ConsistentMassMatrix(const double A, BoundedMatrix<double, 3, 3 > & M)
{
double c1 = A / 12.0;
double c2 = 2.0 * c1;
M(0, 0) = c2;
M(0, 1) = c1;
M(0, 2) = c1;
M(1, 0) = c1;
M(1, 1) = c2;
M(1, 2) = c1;
M(2, 0) = c1;
M(2, 1) = c1;
M(2, 2) = c2;
}
void CalculateInterfaceNormal(BoundedMatrix<double, 3, 2 >& rPoints, array_1d<double,3>& rDistances, array_1d<double,2>& normal, double & interface_area, array_1d<double,3>& Ninterface, BoundedMatrix<double, 2, 2 >& rInterfacePoints)
{
double sign_correction=1.0;
BoundedMatrix<double, 2, 2 > InterfacePoints;
array_1d<bool,3> cut_edges;
array_1d<double,2> interface_segment=ZeroVector(2);
if ((rDistances(0)*rDistances(1))<0.0) cut_edges[0]=true;//edge 12 is cut
else cut_edges[0]=false;
if ((rDistances(1)*rDistances(2))<0.0) cut_edges[1]=true;//edge 23 is cut.
else cut_edges[1]=false;
if ((rDistances(2)*rDistances(0))<0.0) cut_edges[2]=true;//edge 13 is cut.
else cut_edges[2]=false;
if (cut_edges[0])
{
if (rDistances(0)>0.0) sign_correction=1.0;
else sign_correction=-1.0;
const double relative_position = abs(rDistances(1)/(rDistances(1)-rDistances(0) ) );
InterfacePoints(0,0) = relative_position*rPoints(0,0) + (1.0-relative_position)*rPoints(1,0);
InterfacePoints(0,1) = relative_position*rPoints(0,1) + (1.0-relative_position)*rPoints(1,1);
if (cut_edges[1])
{
const double relative_position2 = abs(rDistances(2)/(rDistances(1)-rDistances(2) ) );
InterfacePoints(1,0) = relative_position2*rPoints(1,0) + (1.0-relative_position2)*rPoints(2,0);
InterfacePoints(1,1) = relative_position2*rPoints(1,1) + (1.0-relative_position2)*rPoints(2,1);
}
else
{
const double relative_position2 = abs(rDistances(0)/(rDistances(2)-rDistances(0) ) );
InterfacePoints(1,0) = relative_position2*rPoints(2,0) + (1.0-relative_position2)*rPoints(0,0);
InterfacePoints(1,1) = relative_position2*rPoints(2,1) + (1.0-relative_position2)*rPoints(0,1);
}
}
else
{
if (rDistances(1)>0.0) sign_correction=1.0;
else sign_correction=-1.0;
const double relative_position = abs(rDistances(2)/(rDistances(2)-rDistances(1) ) );
InterfacePoints(0,0) = relative_position*rPoints(1,0) + (1.0-relative_position)*rPoints(2,0);
InterfacePoints(0,1) = relative_position*rPoints(1,1) + (1.0-relative_position)*rPoints(2,1);
const double relative_position2 = abs(rDistances(0)/(rDistances(2)-rDistances(0) ) );
InterfacePoints(1,0) = relative_position2*rPoints(2,0) + (1.0-relative_position2)*rPoints(0,0);
InterfacePoints(1,1) = relative_position2*rPoints(2,1) + (1.0-relative_position2)*rPoints(0,1);
}
interface_segment[0] = (InterfacePoints(1,0)-InterfacePoints(0,0));
interface_segment[1] = (InterfacePoints(1,1)-InterfacePoints(0,1));
const double norm = sqrt( pow((interface_segment[0]),2) + pow((interface_segment[1]),2));
normal(0)= -interface_segment[1]*sign_correction/norm;
normal(1)= interface_segment[0]*sign_correction/norm;
//KRATOS_WATCH(interface_segment)
//KRATOS_WATCH(InterfacePoints)
interface_area=norm;
rInterfacePoints(0,0)=InterfacePoints(0,0);
rInterfacePoints(0,1)=InterfacePoints(0,1);
rInterfacePoints(1,0)=InterfacePoints(1,0);
rInterfacePoints(1,1)=InterfacePoints(1,1);
const double x_interface = 0.5*(InterfacePoints(0,0)+InterfacePoints(1,0));
const double y_interface = 0.5*(InterfacePoints(0,1)+InterfacePoints(1,1));
// CalculatePosition(rPoints, x_interface, y_interface, 0.0, Ninterface );
///meto aqui el CalculatePosition(rPoints, x_interface, y_interface, 0.0, Ninterface );
double x0 = rPoints(0,0);
double y0 = rPoints(0,1);
double x1 = rPoints(1,0);
double y1 = rPoints(1,1);
double x2 = rPoints(2,0);
double y2 = rPoints(2,1);
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_ERROR<<"element with zero area found";
}
else
{
inv_area = 1.0 / area;
}
Ninterface[0]= CalculateVol(x1, y1, x2, y2, x_interface, y_interface) * inv_area;
Ninterface[1] = CalculateVol(x2, y2, x0, y0, x_interface, y_interface) * inv_area;
Ninterface[2] = CalculateVol(x0, y0, x1, y1, x_interface, y_interface) * inv_area;
}
bool CalculatePosition(Geometry<Node < 3 > >&geom,const double xc, const double yc, const double zc,array_1d<double, 3 > & N )
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_ERROR<<"element with zero area found";
}
else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
template<class T>
bool InvertMatrix(const T& input, T& inverse)
{
typedef permutation_matrix<std::size_t> pmatrix;
// create a working copy of the input
T A(input);
// create a permutation matrix for the LU-factorization
pmatrix pm(A.size1());
// perform LU-factorization
int res = lu_factorize(A, pm);
if (res != 0)
return false;
// create identity matrix of "inverse"
inverse.assign(identity_matrix<double> (A.size1()));
// backsubstitute to get the inverse
lu_substitute(A, pm, inverse);
return true;
}
};
} // namespace Kratos.
#endif // KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED defined
|
threading_utils.h | /*!
* Copyright 2015-2019 by Contributors
* \file common.h
* \brief Threading utilities
*/
#ifndef XGBOOST_COMMON_THREADING_UTILS_H_
#define XGBOOST_COMMON_THREADING_UTILS_H_
#include <dmlc/common.h>
#include <dmlc/omp.h>
#include <algorithm>
#include <limits>
#include <type_traits> // std::is_signed
#include <vector>
#include "xgboost/logging.h"
#if !defined(_OPENMP)
extern "C" {
inline int32_t omp_get_thread_limit() __GOMP_NOTHROW { return 1; } // NOLINT
}
#endif // !defined(_OPENMP)
// MSVC doesn't implement the thread limit.
#if defined(_OPENMP) && defined(_MSC_VER)
extern "C" {
inline int32_t omp_get_thread_limit() { return std::numeric_limits<int32_t>::max(); } // NOLINT
}
#endif // defined(_MSC_VER)
namespace xgboost {
namespace common {
// Represent simple range of indexes [begin, end)
// Inspired by tbb::blocked_range
class Range1d {
public:
Range1d(size_t begin, size_t end): begin_(begin), end_(end) {
CHECK_LT(begin, end);
}
size_t begin() const { // NOLINT
return begin_;
}
size_t end() const { // NOLINT
return end_;
}
private:
size_t begin_;
size_t end_;
};
// Split 2d space to balanced blocks
// Implementation of the class is inspired by tbb::blocked_range2d
// However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example:
// [ 1,2,3 ]
// [ 4,5,6 ]
// [ 7,8,9 ]
// But the class is able to work with different sizes in each 'row'. Example:
// [ 1,2 ]
// [ 3,4,5,6 ]
// [ 7,8,9]
// If grain_size is 2: It produces following blocks:
// [1,2], [3,4], [5,6], [7,8], [9]
// The class helps to process data in several tree nodes (non-balanced usually) in parallel
// Using nested parallelism (by nodes and by data in each node)
// it helps to improve CPU resources utilization
class BlockedSpace2d {
public:
// Example of space:
// [ 1,2 ]
// [ 3,4,5,6 ]
// [ 7,8,9]
// BlockedSpace2d will create following blocks (tasks) if grain_size=2:
// 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values)
// 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values)
// 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values)
// 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values)
// 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values)
// Arguments:
// dim1 - size of the first dimension in the space
// getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index
// grain_size - max size of produced blocks
template<typename Func>
BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) {
for (size_t i = 0; i < dim1; ++i) {
const size_t size = getter_size_dim2(i);
const size_t n_blocks = size/grain_size + !!(size % grain_size);
for (size_t iblock = 0; iblock < n_blocks; ++iblock) {
const size_t begin = iblock * grain_size;
const size_t end = std::min(begin + grain_size, size);
AddBlock(i, begin, end);
}
}
}
// Amount of blocks(tasks) in a space
size_t Size() const {
return ranges_.size();
}
// get index of the first dimension of i-th block(task)
size_t GetFirstDimension(size_t i) const {
CHECK_LT(i, first_dimension_.size());
return first_dimension_[i];
}
// get a range of indexes for the second dimension of i-th block(task)
Range1d GetRange(size_t i) const {
CHECK_LT(i, ranges_.size());
return ranges_[i];
}
private:
void AddBlock(size_t first_dimension, size_t begin, size_t end) {
first_dimension_.push_back(first_dimension);
ranges_.emplace_back(begin, end);
}
std::vector<Range1d> ranges_;
std::vector<size_t> first_dimension_;
};
// Wrapper to implement nested parallelism with simple omp parallel for
template <typename Func>
void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) {
const size_t num_blocks_in_space = space.Size();
CHECK_GE(nthreads, 1);
dmlc::OMPException exc;
#pragma omp parallel num_threads(nthreads)
{
exc.Run([&]() {
size_t tid = omp_get_thread_num();
size_t chunck_size =
num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads);
size_t begin = chunck_size * tid;
size_t end = std::min(begin + chunck_size, num_blocks_in_space);
for (auto i = begin; i < end; i++) {
func(space.GetFirstDimension(i), space.GetRange(i));
}
});
}
exc.Rethrow();
}
/**
* OpenMP schedule
*/
struct Sched {
enum {
kAuto,
kDynamic,
kStatic,
kGuided,
} sched;
size_t chunk{0};
Sched static Auto() { return Sched{kAuto}; }
Sched static Dyn(size_t n = 0) { return Sched{kDynamic, n}; }
Sched static Static(size_t n = 0) { return Sched{kStatic, n}; }
Sched static Guided() { return Sched{kGuided}; }
};
template <typename Index, typename Func>
void ParallelFor(Index size, int32_t n_threads, Sched sched, Func fn) {
#if defined(_MSC_VER)
// msvc doesn't support unsigned integer as openmp index.
using OmpInd = std::conditional_t<std::is_signed<Index>::value, Index, omp_ulong>;
#else
using OmpInd = Index;
#endif
OmpInd length = static_cast<OmpInd>(size);
CHECK_GE(n_threads, 1);
dmlc::OMPException exc;
switch (sched.sched) {
case Sched::kAuto: {
#pragma omp parallel for num_threads(n_threads)
for (OmpInd i = 0; i < length; ++i) {
exc.Run(fn, i);
}
break;
}
case Sched::kDynamic: {
if (sched.chunk == 0) {
#pragma omp parallel for num_threads(n_threads) schedule(dynamic)
for (OmpInd i = 0; i < length; ++i) {
exc.Run(fn, i);
}
} else {
#pragma omp parallel for num_threads(n_threads) schedule(dynamic, sched.chunk)
for (OmpInd i = 0; i < length; ++i) {
exc.Run(fn, i);
}
}
break;
}
case Sched::kStatic: {
if (sched.chunk == 0) {
#pragma omp parallel for num_threads(n_threads) schedule(static)
for (OmpInd i = 0; i < length; ++i) {
exc.Run(fn, i);
}
} else {
#pragma omp parallel for num_threads(n_threads) schedule(static, sched.chunk)
for (OmpInd i = 0; i < length; ++i) {
exc.Run(fn, i);
}
}
break;
}
case Sched::kGuided: {
#pragma omp parallel for num_threads(n_threads) schedule(guided)
for (OmpInd i = 0; i < length; ++i) {
exc.Run(fn, i);
}
break;
}
}
exc.Rethrow();
}
template <typename Index, typename Func>
void ParallelFor(Index size, int32_t n_threads, Func fn) {
ParallelFor(size, n_threads, Sched::Static(), fn);
}
inline int32_t OmpGetThreadLimit() {
int32_t limit = omp_get_thread_limit();
CHECK_GE(limit, 1) << "Invalid thread limit for OpenMP.";
return limit;
}
inline int32_t OmpGetNumThreads(int32_t n_threads) {
if (n_threads <= 0) {
n_threads = std::min(omp_get_num_procs(), omp_get_max_threads());
}
n_threads = std::min(n_threads, OmpGetThreadLimit());
n_threads = std::max(n_threads, 1);
return n_threads;
}
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_THREADING_UTILS_H_
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#define NDEBUG
#ifndef NDEBUG
#define DEBUG(cmd) cmd;
#else
#define DEBUG(cmd) ;
#endif
#define rows_1 10 // number of rows of left matrix
#define columns_1 5 // number of columns for left matrix
#define rows_2 columns_1 // numver rows for right matrix
#define columns_2 10 // number of columns of right matrix
// function for initialization
double** create_matrix(int rows, int columns)
{
srand(time(NULL));
double** matrix = NULL;
matrix = calloc(sizeof(double*), rows);
for (int i = 0; i < rows; i++)
{
matrix[i] = calloc(sizeof(double), columns);
for (int j = 0; j < columns; j++)
{
matrix[i][j] = rand() % 10;
}
}
return matrix;
}
// function for printing
void print_matrix(double** matrix, int rows, int columns)
{
for (int i = 0; i < rows; i++)
{
printf("|");
for (int j = 0; j < columns; j++)
{
printf("%4.0lf|", matrix[i][j]);
}
printf("\n|");
for (int j = 0; j < columns; j++)
{
printf("____|");
}
printf("\n");
}
}
// function to free memory
void free_matrix(double** matrix, int rows)
{
for (int i = 0; i < rows; i++)
{
free(matrix[i]);
}
free(matrix);
}
int main(int argc, char** argv)
{
// Init and print
double** m1 = create_matrix(rows_1, columns_1);
double** m2 = create_matrix(rows_2, columns_2);
double** m3 = create_matrix(rows_1, columns_2);
printf("First matrix\n");
print_matrix(m1, rows_1, columns_1);
printf("\nSecond matrix\n");
print_matrix(m2, rows_2, columns_2);
// Naive algorithm. One thread calculate one cell
#pragma omp parallel for schedule(static)
for (int i = 0; i < rows_1; i++)
{
#pragma omp parallel for schedule(static)
for (int j = 0; j < columns_2; j++)
{
DEBUG(
int thread_id = omp_get_thread_num();
printf("[Thread %.2d] Calculate [%d][%d] cell\n", thread_id, i, j);
)
double sum = 0;
for (int r = 0; r < columns_1; r++)
{
sum += m1[i][r] * m2[r][j];
}
m3[i][j] = sum;
}
}
// Print result and free memory
printf("\nResult matrix\n");
print_matrix(m3, rows_1, columns_2);
free_matrix(m1, rows_1);
free_matrix(m2, rows_2);
free_matrix(m3, rows_1);
return 0;
}
|
dz2z4.c | #include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timer.h"
char res_seq[100];
char res_par[100];
#define DIM 2 /* Two-dimensional system */
#define X 0 /* x-coordinate subscript */
#define Y 1 /* y-coordinate subscript */
const double G = 6.673e-11;
typedef double vect_t[DIM]; /* Vector type for position, etc. */
// vect_t forces_reduction[4999][5000];
struct particle_s
{
double m; /* Mass */
vect_t s; /* Position */
vect_t v; /* Velocity */
};
int rank, size;
MPI_Datatype particle_s_type;
MPI_Datatype vect_t_type;
enum Tags
{
TAG_CURR = 1000,
TAG_START_IND,
TAG_WORK_FINISH,
TAG_FORCES_SEND,
TAG_FORCES,
};
void Usage(char *prog_name);
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p);
void Get_init_cond(struct particle_s curr[], int n);
void Gen_init_cond(struct particle_s curr[], int n);
void Output_state(double time, struct particle_s curr[], int n);
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n);
void Compute_force_parallel(int part, vect_t forces[], struct particle_s curr[],
int n);
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t);
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p);
void sequential_solution(int argc, char *argv[])
{
int n; /* Number of particles */
int n_steps; /* Number of timesteps */
int step; /* Current step */
int part; /* Current particle */
int output_freq; /* Frequency of output */
double delta_t; /* Size of timestep */
double t; /* Current Time */
struct particle_s *curr; /* Current state of system */
vect_t *forces; /* Forces on each particle */
char g_i; /*_G_en or _i_nput init conds */
double kinetic_energy, potential_energy;
double start, finish; /* For timings */
Get_args(argc, argv, &n, &n_steps, &delta_t, &output_freq, &g_i);
curr = malloc(n * sizeof(struct particle_s));
forces = malloc(n * sizeof(vect_t));
if (g_i == 'i')
Get_init_cond(curr, n);
else
Gen_init_cond(curr, n);
GET_TIME(start);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
Output_state(0, curr, n);
for (step = 1; step <= n_steps; step++)
{
t = step * delta_t;
memset(forces, 0, n * sizeof(vect_t));
for (part = 0; part < n - 1; part++)
Compute_force(part, forces, curr, n);
for (part = 0; part < n; part++)
Update_part(part, forces, curr, n, delta_t);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
}
Output_state(t, curr, n);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
sprintf(res_seq, " PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
free(curr);
free(forces);
} /* sequential_solution */
void parallel_solution(int argc, char *argv[])
{
int n; /* Number of particles */
int n_steps; /* Number of timesteps */
int step; /* Current step */
int part; /* Current particle */
int output_freq; /* Frequency of output */
double delta_t; /* Size of timestep */
double t; /* Current Time */
struct particle_s *curr; /* Current state of system */
vect_t *forces; /* Forces on each particle */
char g_i; /*_G_en or _i_nput init conds */
double kinetic_energy, potential_energy;
double start, finish; /* For timings */
if (rank == 0)
{
Get_args(argc, argv, &n, &n_steps, &delta_t, &output_freq, &g_i);
curr = malloc(n * sizeof(struct particle_s));
forces = malloc(n * sizeof(vect_t));
if (g_i == 'i')
Get_init_cond(curr, n);
else
Gen_init_cond(curr, n);
GET_TIME(start);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
Output_state(0, curr, n);
}
MPI_Bcast(&n_steps, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&delta_t, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
if (rank != 0)
{
curr = malloc(n * sizeof(struct particle_s));
forces = malloc(n * sizeof(vect_t));
}
vect_t *forces_reduced = malloc(n * sizeof(vect_t));
for (step = 1; step <= n_steps; step++)
{
int part_start, part_end;
int chunk = 50;
int particles_done = 0;
int end_flag = -1;
int current_start = 0;
memset(forces, 0, n * sizeof(vect_t));
MPI_Bcast(curr, n, particle_s_type, 0, MPI_COMM_WORLD);
if (rank == 0)
{
MPI_Request req;
for (int i = 0; i < size - 1; i++)
{
if (current_start + chunk >= n)
{
MPI_Send(&end_flag, 1, MPI_INT,
i + 1, TAG_START_IND,
MPI_COMM_WORLD);
}
else
{
current_start = i * chunk;
MPI_Send(¤t_start, 1, MPI_INT,
i + 1, TAG_START_IND, MPI_COMM_WORLD);
}
}
while (particles_done < n)
{
int worker_finished;
MPI_Status status;
MPI_Recv(&worker_finished, 1, MPI_INT, MPI_ANY_SOURCE,
TAG_WORK_FINISH, MPI_COMM_WORLD, &status);
particles_done += 50;
current_start += 50;
if (current_start < n)
{
MPI_Send(¤t_start, 1, MPI_INT,
status.MPI_SOURCE, TAG_START_IND,
MPI_COMM_WORLD);
}
else
{
MPI_Send(&end_flag, 1, MPI_INT,
status.MPI_SOURCE, TAG_START_IND,
MPI_COMM_WORLD);
}
}
}
else
{
MPI_Recv(¤t_start, 1, MPI_INT, 0, TAG_START_IND, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
while (current_start != -1)
{
int current_end = current_start + chunk;
for (part = current_start; part < current_end; part++)
Compute_force(part, forces, curr, n);
MPI_Send(¤t_start, 1, MPI_INT, 0, TAG_WORK_FINISH, MPI_COMM_WORLD);
MPI_Recv(¤t_start, 1, MPI_INT, 0, TAG_START_IND, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
MPI_Reduce(forces, forces_reduced, n * 2, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0)
for (part = 0; part < n; part++)
Update_part(part, forces_reduced, curr, n, delta_t);
}
if (rank == 0)
{
t = step * delta_t;
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
Output_state(t, curr, n);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
sprintf(res_par, " PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
}
free(curr);
free(forces);
free(forces_reduced);
} /* parallel_solution */
int compare_results(void)
{
return !strcmp(res_seq, res_par);
}
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// define types for communication
MPI_Type_contiguous(sizeof(vect_t) / sizeof(double),
MPI_DOUBLE, &vect_t_type);
MPI_Type_commit(&vect_t_type);
MPI_Type_contiguous(sizeof(struct particle_s) / sizeof(double),
MPI_DOUBLE, &particle_s_type);
MPI_Type_commit(&particle_s_type);
double start_time_seq, end_time_seq, start_time_parallel, end_time_parallel;
if (rank == 0)
{
printf("---------------------Sequential execution---------------------\n");
start_time_seq = MPI_Wtime();
sequential_solution(argc, argv);
end_time_seq = MPI_Wtime();
printf("----------------------Parallel execution----------------------\n");
start_time_parallel = MPI_Wtime();
}
parallel_solution(argc, argv);
if (rank == 0)
{
end_time_parallel = MPI_Wtime();
printf("\nSequential elapsed time: %lfs\n", end_time_seq - start_time_seq);
printf("Parallel elapsed time: %lfs\n", end_time_parallel - start_time_parallel);
if (compare_results())
printf("Test PASSED\n");
else
printf("Test FAILED\n");
}
MPI_Type_free(&vect_t_type);
MPI_Type_free(&particle_s_type);
MPI_Finalize();
return 0;
} /* main */
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <number of particles> <number of timesteps>\n",
prog_name);
fprintf(stderr, " <size of timestep> <output frequency>\n");
fprintf(stderr, " <g|i>\n");
fprintf(stderr, " 'g': program should generate init conds\n");
fprintf(stderr, " 'i': program should get init conds from stdin\n");
exit(0);
} /* Usage */
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p)
{
if (argc != 6)
Usage(argv[0]);
*n_p = strtol(argv[1], NULL, 10);
*n_steps_p = strtol(argv[2], NULL, 10);
*delta_t_p = strtod(argv[3], NULL);
*output_freq_p = strtol(argv[4], NULL, 10);
*g_i_p = argv[5][0];
if (*n_p <= 0 || *n_steps_p < 0 || *delta_t_p <= 0)
Usage(argv[0]);
if (*g_i_p != 'g' && *g_i_p != 'i')
Usage(argv[0]);
} /* Get_args */
void Get_init_cond(struct particle_s curr[], int n)
{
int part;
printf("For each particle, enter (in order):\n");
printf(" its mass, its x-coord, its y-coord, ");
printf("its x-velocity, its y-velocity\n");
for (part = 0; part < n; part++)
{
scanf("%lf", &curr[part].m);
scanf("%lf", &curr[part].s[X]);
scanf("%lf", &curr[part].s[Y]);
scanf("%lf", &curr[part].v[X]);
scanf("%lf", &curr[part].v[Y]);
}
} /* Get_init_cond */
void Gen_init_cond(struct particle_s curr[], int n)
{
int part;
double mass = 5.0e24;
double gap = 1.0e5;
double speed = 3.0e4;
srandom(1);
for (part = 0; part < n; part++)
{
curr[part].m = mass;
curr[part].s[X] = part * gap;
curr[part].s[Y] = 0.0;
curr[part].v[X] = 0.0;
if (part % 2 == 0)
curr[part].v[Y] = speed;
else
curr[part].v[Y] = -speed;
}
} /* Gen_init_cond */
void Output_state(double time, struct particle_s curr[], int n)
{
int part;
printf("%.2f\n", time);
for (part = 0; part < n; part++)
{
printf("%3d %10.3e ", part, curr[part].s[X]);
printf(" %10.3e ", curr[part].s[Y]);
printf(" %10.3e ", curr[part].v[X]);
printf(" %10.3e\n", curr[part].v[Y]);
}
printf("\n");
} /* Output_state */
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n)
{
int k;
double mg;
vect_t f_part_k;
double len, len_3, fact;
// #pragma omp parallel for private(f_part_k, len, len_3, mg, fact)
for (k = part + 1; k < n; k++)
{
f_part_k[X] = curr[part].s[X] - curr[k].s[X];
f_part_k[Y] = curr[part].s[Y] - curr[k].s[Y];
len = sqrt(f_part_k[X] * f_part_k[X] + f_part_k[Y] * f_part_k[Y]);
len_3 = len * len * len;
mg = -G * curr[part].m * curr[k].m;
fact = mg / len_3;
f_part_k[X] *= fact;
f_part_k[Y] *= fact;
// #pragma omp atomic
forces[part][X] += f_part_k[X];
// #pragma omp atomic
forces[part][Y] += f_part_k[Y];
forces[k][X] -= f_part_k[X];
forces[k][Y] -= f_part_k[Y];
}
} /* Compute_force */
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t)
{
double fact = delta_t / curr[part].m;
curr[part].s[X] += delta_t * curr[part].v[X];
curr[part].s[Y] += delta_t * curr[part].v[Y];
curr[part].v[X] += fact * forces[part][X];
curr[part].v[Y] += fact * forces[part][Y];
} /* Update_part */
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p)
{
int i, j;
vect_t diff;
double pe = 0.0, ke = 0.0;
double dist, speed_sqr;
for (i = 0; i < n; i++)
{
speed_sqr = curr[i].v[X] * curr[i].v[X] + curr[i].v[Y] * curr[i].v[Y];
ke += curr[i].m * speed_sqr;
}
ke *= 0.5;
for (i = 0; i < n - 1; i++)
{
for (j = i + 1; j < n; j++)
{
diff[X] = curr[i].s[X] - curr[j].s[X];
diff[Y] = curr[i].s[Y] - curr[j].s[Y];
dist = sqrt(diff[X] * diff[X] + diff[Y] * diff[Y]);
pe += -G * curr[i].m * curr[j].m / dist;
}
}
*kin_en_p = ke;
*pot_en_p = pe;
} /* Compute_energy */
|
DRB016-outputdep-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The loop in this example cannot be parallelized.
This pattern has two pair of dependencies:
1. loop carried output dependence
x = .. :
2. loop carried true dependence due to:
.. = x;
x = ..;
Data race pairs: we allow two pairs to preserve the original code pattern.
1. x@73:12 vs. x@74:5
2. x@74:5 vs. x@74:5
*/
#include <stdio.h>
int a[100];
int main()
{
int len=100;
int i,x=10;
#pragma omp parallel for schedule(dynamic)
for (i=0;i<len;i++)
{
a[i] = x;
x=i;
}
printf("x=%d",x);
return 0;
}
|
generated-funcs.c | // Check that the CHECK lines are generated for clang-generated functions
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp %s -emit-llvm -o - | FileCheck --check-prefix=OMP %s
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu %s -emit-llvm -o - | FileCheck --check-prefix=NOOMP %s
const int size = 1024 * 1024 * 32;
double A[size];
void foo(void);
int main(void) {
int i = 0;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
A[i] = 0.0;
}
foo();
return 0;
}
void foo(void) {
int i = 0;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
A[i] = 1.0;
}
}
|
itunes_fmt_plug.c | /* JtR format to crack encrypted iTunes Backup passwords.
*
* This software is Copyright (c) 2017, Dhiru Kholia <dhiru at openwall.com>
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credit goes to Jean-Baptiste Bédrune, Jean Sigwald, DinoSec, philsmd,
* and Andrew Neitsch.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_itunes;
#elif FMT_REGISTERS_H
john_register_one(&fmt_itunes);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "pbkdf2_hmac_sha1.h"
#include "pbkdf2_hmac_sha256.h"
#include "jumbo.h"
#include "memdbg.h"
#include "itunes_common.h"
#define FORMAT_LABEL "itunes-backup"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 AES " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SSE_GROUP_SZ_SHA1 * SSE_GROUP_SZ_SHA256)
#define MAX_KEYS_PER_CRYPT (SSE_GROUP_SZ_SHA1 * SSE_GROUP_SZ_SHA256)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests itunes_tests[] = {
// real iTunes 9.x hash
{"$itunes_backup$*9*bc707ac0151660426c8114d04caad9d9ee2678a7b7ab05c18ee50cafb2613c31c8978e8b1e9cad2a*10000*266343aaf99102ba7f6af64a3a2d62637793f753**", "123456"},
// artificial hashes generated by hashcat's tools/test.pl script with a low dpic (iterations) value to ease testing
{"$itunes_backup$*10*31021f9c5a705c3625af21739d397082d90f7a00718a9307687625abc35fc3e4d78371e95cc708b6*10000*8840233131165307147445064802216857558435*1000*c77a159b325d10efee51a1c05701ef63fb85b599", "855632538858211"},
{"$itunes_backup$*10*b3d3f05b5367345fcb654b9b628e2ed24d8b8726f1f74707a956c776475d6ebfffc962340d9cbbca*10000*6832814730342072666684158073107301064276*1000*46de5e844e0ee1c81d2cca6acefb77789c1a7cd0", "1"},
// real iTunes 9.x hash
{"$itunes_backup$*9*06dc04bca4eeea2fbc1bc7356fa758243bead479673640a668db285c8f48c402cc435539d935509e*10000*37d2bd7caefbb24a9729e41a3257ef06188dc01e**", "test123"},
// {"$itunes_backup$*10*deff6d646eb1fa2b6741efee8b70eda84341a838cef2bb10e582669759d7e33c399a0ba2a52cb9ec*10000*f09cfa82cc1695657cb2c347ee127c2523795fda*10000000*66f159e15f3ddbbdd4057f8babef7ad4472fac10", "test123"}, // real hash, this is very very slow!
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static struct custom_salt *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void itunes_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int i;
if (cur_salt->version == 9) { // iTunes Backup < 10
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
int loops = MAX_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA1;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
for (i = 0; i < loops; i++)
pbkdf2_sha1_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA1), &lens[i * SSE_GROUP_SZ_SHA1], cur_salt->salt, SALTLEN, cur_salt->iterations, pout + (i * SSE_GROUP_SZ_SHA1), 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, SALTLEN, cur_salt->iterations, master[i], 32, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
cracked[index+i] = itunes_common_decrypt(cur_salt, master[i]);
}
} else { // iTunes Backup 10.x
#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
int loops = MAX_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA256;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
for (i = 0; i < loops; i++)
pbkdf2_sha256_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA256), &lens[i * SSE_GROUP_SZ_SHA256], cur_salt->dpsl, SALTLEN, cur_salt->dpic, pout + (i * SSE_GROUP_SZ_SHA256), 32, 0);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 32;
pin[i] = (unsigned char*)master[i];
pout[i] = master[i];
}
loops = MAX_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA1;
for (i = 0; i < loops; i++)
pbkdf2_sha1_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA1), &lens[i * SSE_GROUP_SZ_SHA1], cur_salt->salt, SALTLEN, cur_salt->iterations, pout + (i * SSE_GROUP_SZ_SHA1), 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
pbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->dpsl, SALTLEN, cur_salt->dpic, master[i], 32, 0);
pbkdf2_sha1(master[i], 32, cur_salt->salt, SALTLEN, cur_salt->iterations, master[i], 32, 0);
}
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
cracked[index+i] = itunes_common_decrypt(cur_salt, master[i]);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_itunes = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"version",
"iteration count",
},
{ FORMAT_TAG },
itunes_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
itunes_common_valid,
fmt_default_split,
fmt_default_binary,
itunes_common_get_salt,
{
itunes_common_tunable_version,
itunes_common_tunable_iterations,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
itunes_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
critical.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void critical_example() {
#pragma omp critical
{ printf("Hello World from thread %d!\n", omp_get_thread_num()); }
}
int main(void) {
#pragma omp parallel
{ critical_example(); }
return EXIT_SUCCESS;
} |
tree.h | #ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/meta.h>
#include <LightGBM/dataset.h>
#include <string>
#include <vector>
#include <memory>
#include <map>
namespace LightGBM {
#define kMaxTreeOutput (100)
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Construtor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = output;
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scorese
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the traning process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] *= rate;
if (leaf_value_[i] > kMaxTreeOutput) { leaf_value_[i] = kMaxTreeOutput; } else if (leaf_value_[i] < -kMaxTreeOutput) { leaf_value_[i] = -kMaxTreeOutput; }
}
shrinkage_ *= rate;
}
inline double shrinkage() const {
return shrinkage_;
}
inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] = val + leaf_value_[i];
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool is_predict_leaf_index) const;
inline static bool IsZero(double fval) {
if (fval > -kZeroThreshold && fval <= kZeroThreshold) {
return true;
} else {
return false;
}
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
private:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval)) {
if (missing_type != 2) {
fval = 0.0f;
}
}
if ((missing_type == 1 && IsZero(fval))
|| (missing_type == 2 && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == 1 && fval == default_bin)
|| (missing_type == 2 && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
int int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];;
} else if (std::isnan(fval)) {
// NaN is always in the right
if (missing_type == 2) {
return right_child_[node];
}
int_fval = 0;
}
int cat_idx = int(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = int(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool is_predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool is_predict_leaf_index) const;
double ExpectedValue() const;
int MaxDepth();
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (https://arxiv.org/abs/1706.06060) */
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permuation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current levas*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and mising value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = Common::AvoidInf(gain);
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
}
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
const int max_path_len = MaxDepth() + 1;
PathElement *unique_path_data = new PathElement[(max_path_len*(max_path_len + 1)) / 2];
TreeSHAP(feature_values, output, 0, 0, unique_path_data, 1, 1, -1);
delete[] unique_path_data;
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
owl_matrix_swap_impl_omp.h | /*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2022 Liang Wang <liang@ocaml.xyz>
*/
#ifdef OWL_ENABLE_TEMPLATE
// swap row i and row j in x(m,n)
void FUNCTION (c, swap_rows) (TYPE *x, int m, int n, int i, int j) {
if (i != j) {
TYPE * src = x + n * i;
TYPE * dst = x + n * j;
if (n >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int k = 0; k < n; k++) {
TYPE t = *(src + k);
*(src + k) = *(dst + k);
*(dst + k) = t;
}
}
else {
for (int k = 0; k < n; k++) {
TYPE t = *(src + k);
*(src + k) = *(dst + k);
*(dst + k) = t;
}
}
}
}
// stub function of swap_rows
CAMLprim value FUNCTION (stub, swap_rows) (value vX, value vM, value vN, value vI, value vJ) {
struct caml_ba_array *X = Caml_ba_array_val(vX);
TYPE *X_data = (TYPE *) X->data;
int m = Long_val(vM);
int n = Long_val(vN);
int i = Long_val(vI);
int j = Long_val(vJ);
FUNCTION (c, swap_rows) (X_data, m, n, i, j);
return Val_unit;
}
// swap column i and column j in x(m,n)
void FUNCTION (c, swap_cols) (TYPE *x, int m, int n, int i, int j) {
if (i != j) {
TYPE * src = x + i;
TYPE * dst = x + j;
if (m >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int k = 0; k < m; k++) {
int base = k * n;
TYPE t = *(src + base);
*(src + base) = *(dst + base);
*(dst + base) = t;
}
}
else {
int base = 0;
for (int k = 0; k < m; k++) {
TYPE t = *(src + base);
*(src + base) = *(dst + base);
*(dst + base) = t;
base += n;
}
}
}
}
// stub function of swap_cols
CAMLprim value FUNCTION (stub, swap_cols) (value vX, value vM, value vN, value vI, value vJ) {
struct caml_ba_array *X = Caml_ba_array_val(vX);
TYPE *X_data = (TYPE *) X->data;
int m = Long_val(vM);
int n = Long_val(vN);
int i = Long_val(vI);
int j = Long_val(vJ);
FUNCTION (c, swap_cols) (X_data, m, n, i, j);
return Val_unit;
}
// transpose x(m,n) and save to y(n,m)
void FUNCTION (c, transpose) (TYPE *x, TYPE *y, int m, int n) {
int ofsx = 0;
int ofsy = 0;
if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
*(y + i + j * m) = *(x + j + i * n);
}
}
}
else {
for (int i = 0; i < m; i++) {
ofsy = i;
for (int j = 0; j < n; j++) {
*(y + ofsy) = *(x + ofsx);
ofsy += m;
ofsx += 1;
}
}
}
}
// stub function of transpose
CAMLprim value FUNCTION (stub, transpose) (value vX, value vY) {
struct caml_ba_array *X = Caml_ba_array_val(vX);
TYPE *X_data = (TYPE *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
TYPE *Y_data = (TYPE *) Y->data;
FUNCTION (c, transpose) (X_data, Y_data, X->dim[0], X->dim[1]);
return Val_unit;
}
// conjugate transpose x(m,n) and save to y(n,m)
void FUNCTION (c, ctranspose) (TYPE *x, TYPE *y, int m, int n) {
int ofsx = 0;
int ofsy = 0;
if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
*(y + i + j * m) = CONJ_FUN(*(x + j + i * n));
}
}
}
else {
for (int i = 0; i < m; i++) {
ofsy = i;
for (int j = 0; j < n; j++) {
*(y + ofsy) = CONJ_FUN(*(x + ofsx));
ofsy += m;
ofsx += 1;
}
}
}
}
// stub function of ctranspose
CAMLprim value FUNCTION (stub, ctranspose) (value vX, value vY) {
struct caml_ba_array *X = Caml_ba_array_val(vX);
TYPE *X_data = (TYPE *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
TYPE *Y_data = (TYPE *) Y->data;
FUNCTION (c, ctranspose) (X_data, Y_data, X->dim[0], X->dim[1]);
return Val_unit;
}
#endif /* OWL_ENABLE_TEMPLATE */
|
GB_unop__log2_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log2_fp32_fp32
// op(A') function: GB_unop_tran__log2_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log2f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log2f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log2f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log2_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log2f (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log2_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
serialized.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
#include "callback.h"
int main()
{
// print_frame(0);
#pragma omp parallel num_threads(1)
{
// print_frame(1);
print_ids(0);
print_ids(1);
// print_frame(0);
#pragma omp parallel num_threads(1)
{
// print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
// print_frame(0);
#pragma omp task
{
// print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
print_ids(3);
}
}
print_fuzzy_address(1);
}
print_fuzzy_address(2);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_end'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=[[OUTER_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=[[INNER_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[NESTED_IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id=[[EXPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[NESTED_IMPLICIT_TASK_ID]], second_task_id=[[EXPLICIT_TASK_ID]], prior_task_status=ompt_task_switch=7
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[EXPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[EXPLICIT_TASK_ID]], second_task_id=[[NESTED_IMPLICIT_TASK_ID]], prior_task_status=ompt_task_complete=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[EXPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]], codeptr_ra=[[INNER_RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[INNER_RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]], codeptr_ra=[[OUTER_RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[OUTER_RETURN_ADDRESS]]
return 0;
}
|
residualbased_newton_raphson_contact_strategy.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY)
#define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "contact_structural_mechanics_application_variables.h"
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
// Strategies
#include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h"
// Utilities
#include "utilities/variable_utils.h"
#include "utilities/color_utilities.h"
#include "utilities/math_utils.h"
#include "custom_python/process_factory_utility.h"
#include "custom_utilities/contact_utilities.h"
namespace Kratos {
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedNewtonRaphsonContactStrategy
* @ingroup ContactStructuralMechanicsApplication
* @brief Contact Newton Raphson class
* @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedNewtonRaphsonContactStrategy :
public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef ResidualBasedNewtonRaphsonContactStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ProcessFactoryUtility::Pointer ProcessesListType;
typedef std::size_t IndexType;
/**
* @brief Default constructor
*/
explicit ResidualBasedNewtonRaphsonContactStrategy()
{
}
/**
* @brief Default constructor. (with parameters)
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
explicit ResidualBasedNewtonRaphsonContactStrategy(ModelPart& rModelPart, Parameters ThisParameters)
: BaseType(rModelPart),
mpMyProcesses(nullptr),
mpPostProcesses(nullptr)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
// Auxiliar assign
mConvergenceCriteriaEchoLevel = BaseType::mpConvergenceCriteria->GetEchoLevel();
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: BaseType(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~ResidualBasedNewtonRaphsonContactStrategy() override
= default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
typename StrategyBaseType::Pointer Create(
ModelPart& rModelPart,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(rModelPart, ThisParameters);
}
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
* values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
// Auxiliar zero array
const array_1d<double, 3> zero_array = ZeroVector(3);
// Set to zero the weighted gap
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes();
const bool frictional = r_model_part.Is(SLIP);
// We predict contact pressure in case of contact problem
if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) {
VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, nodes_array);
if (frictional) {
VariableUtils().SetVariable(WEIGHTED_SLIP, zero_array, nodes_array);
}
// Compute the current gap
ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact"));
// We predict a contact pressure
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
const std::size_t step = r_process_info[STEP];
if (step == 1) {
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT);
}
} else {
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) += (it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1));
}
}
}
// BaseType::Predict(); // NOTE: May cause problems in dynamics!!!
//
// // Set to zero the weighted gap // NOTE: This can be done during the search if the predict is deactivated
// ModelPart& r_model_part = StrategyBaseType::GetModelPart();
// NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes();
//
// // We predict contact pressure in case of contact problem
// if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) {
// VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, nodes_array);
//
// // Compute the current gap
// ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact"));
//
// // We predict a contact pressure
// ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
// const double initial_penalty_parameter = r_process_info[INITIAL_PENALTY];
//
// // We iterate over the nodes
// bool is_components = nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) ? false : true;
//
// #pragma omp parallel for
// for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
// auto it_node = nodes_array.begin() + i;
//
// const double current_gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP);
//
// const double penalty = it_node->Has(INITIAL_PENALTY) ? it_node->GetValue(INITIAL_PENALTY) : initial_penalty_parameter;
//
// if (current_gap < 0.0) {
// it_node->Set(ACTIVE, true);
// if (is_components) {
// it_node->FastGetSolutionStepValue(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) = penalty * current_gap;
// } else {
// const array_1d<double, 3>& normal = it_node->FastGetSolutionStepValue(NORMAL);
// it_node->FastGetSolutionStepValue(VECTOR_LAGRANGE_MULTIPLIER) = penalty * current_gap * normal;
// }
// }
// }
// }
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY;
BaseType::Initialize();
mFinalizeWasPerformed = false;
// Initializing NL_ITERATION_NUMBER
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
r_process_info[NL_ITERATION_NUMBER] = 1;
KRATOS_CATCH("");
}
/**
* @brief The problem of interest is solved.
* @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(),
* SolveSolutionStep() and FinalizeSolutionStep().
* All those functions can otherwise be called separately.
*/
double Solve() override
{
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
this->SolveSolutionStep();
this->FinalizeSolutionStep();
// TODO: Add something if necessary
return 0.0;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
BaseType::mpConvergenceCriteria->SetEchoLevel(0);
BaseType::InitializeSolutionStep();
BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel);
mFinalizeWasPerformed = false;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* after solving the solution step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
if (mFinalizeWasPerformed == false) {
BaseType::FinalizeSolutionStep();
// To avoid compute twice the FinalizeSolutionStep
mFinalizeWasPerformed = true;
}
KRATOS_CATCH("");
}
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
KRATOS_TRY;
// bool is_converged = BaseType::SolveSolutionStep(); // FIXME: Requires to separate the non linear iterations
// bool is_converged = BaseSolveSolutionStep(); // Direct solution
bool is_converged = false;
// Getting model part
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
if (r_model_part.IsNot(INTERACTION)) {
// We get the system
TSystemMatrixType& A = *BaseType::mpA;
TSystemVectorType& Dx = *BaseType::mpDx;
TSystemVectorType& b = *BaseType::mpb;
// We get the process info
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
int inner_iteration = 0;
while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) {
++inner_iteration;
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << std::endl << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << inner_iteration;;
}
// We solve one loop
r_process_info[NL_ITERATION_NUMBER] = 1;
r_process_info[INNER_LOOP_ITERATION] = inner_iteration;
is_converged = BaseSolveSolutionStep();
// We check the convergence
BaseType::mpConvergenceCriteria->SetEchoLevel(0);
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), A, Dx, b);
BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel);
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
if (is_converged) std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl;
else std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl;
}
}
} else {
// We compute the base loop
r_model_part.GetProcessInfo()[INNER_LOOP_ITERATION] = 1;
is_converged = BaseSolveSolutionStep();
}
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (!is_converged) {
is_converged = AdaptativeStep();
}
}
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "newton_raphson_contact_strategy",
"adaptative_strategy" : false,
"split_factor" : 10.0,
"max_number_splits" : 3,
"inner_loop_iterations" : 5
})" );
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "newton_raphson_contact_strategy";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
Parameters mThisParameters; /// The configuration parameters
// ADAPTATIVE STRATEGY PARAMETERS
bool mFinalizeWasPerformed; /// If the FinalizeSolutionStep has been already permformed
ProcessesListType mpMyProcesses; /// The processes list
ProcessesListType mpPostProcesses; /// The post processes list
// OTHER PARAMETERS
int mConvergenceCriteriaEchoLevel; /// The echo level of the convergence criteria
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// Copy the parameters
mThisParameters = ThisParameters;
}
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool BaseSolveSolutionStep()
{
KRATOS_TRY;
// Pointers needed in the solution
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
typename TSchemeType::Pointer p_scheme = BaseType::GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver();
auto& r_dof_set = p_builder_and_solver->GetDofSet();
TSystemMatrixType& rA = *BaseType::mpA;
TSystemVectorType& rDx = *BaseType::mpDx;
TSystemVectorType& rb = *BaseType::mpb;
// Initializing the parameters of the Newton-Raphson cicle
IndexType iteration_number = 1;
r_process_info[NL_ITERATION_NUMBER] = iteration_number;
bool is_converged = false;
bool residual_is_updated = false;
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb);
// We do a geometry check before solve the system for first time
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT BEFORE FIRST SOLVE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
// Function to perform the building and the solving phase.
if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) {
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
} else {
TSparseSpace::SetToZero(rDx); //Dx=0.00;
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag());
// We now check the geometry
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
if (is_converged) {
// Initialisation of the convergence criteria
BaseType::mpConvergenceCriteria->InitializeSolutionStep(r_model_part, r_dof_set, rA, rDx, rb);
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb);
}
// Iteration Cicle... performed only for NonLinearProblems
while (is_converged == false && iteration_number++<BaseType::mMaxIterationNumber) {
//setting the number of iteration
r_process_info[NL_ITERATION_NUMBER] = iteration_number;
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb);
//call the linear system solver to find the correction mDx for the
//it is not called if there is no system to solve
if (SparseSpaceType::Size(rDx) != 0) {
if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false ) {
if( BaseType::GetKeepSystemConstantDuringIterations() == false) {
//A = 0.00;
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
else {
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
}
else {
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
} else {
KRATOS_WARNING("No DoFs") << "ATTENTION: no free DOFs!! " << std::endl;
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag());
// We now check the geometry
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
residual_is_updated = false;
if (is_converged) {
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
residual_is_updated = true;
//std::cout << "mb is calculated" << std::endl;
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb);
}
}
// Plots a warning if the maximum number of iterations is exceeded
if (iteration_number >= BaseType::mMaxIterationNumber && r_model_part.GetCommunicator().MyPID() == 0)
MaxIterationsExceeded();
// Recalculate residual if needed
// (note that some convergence criteria need it to be recalculated)
if (residual_is_updated == false) {
// NOTE:
// The following part will be commented because it is time consuming
// and there is no obvious reason to be here. If someone need this
// part please notify the community via mailing list before uncommenting it.
// Pooyan.
// TSparseSpace::SetToZero(mb);
// p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb);
}
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief This method performs the adaptative step
*/
bool AdaptativeStep()
{
KRATOS_TRY;
bool is_converged = false;
// Plots a warning if the maximum number of iterations is exceeded
if (mpMyProcesses == nullptr && StrategyBaseType::mEchoLevel > 0)
KRATOS_WARNING("No python processes") << "If you have not implemented any method to recalculate BC or loads in function of time, this strategy will be USELESS" << std::endl;
if (mpPostProcesses == nullptr && StrategyBaseType::mEchoLevel > 0)
KRATOS_WARNING("No python post processes") << "If you don't add the postprocesses and the time step if splitted you won't postprocess that steps" << std::endl;
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
const double original_delta_time = r_process_info[DELTA_TIME]; // We save the delta time to restore later
int split_number = 0;
// We iterate until we reach the convergence or we split more than desired
while (is_converged == false && split_number <= mThisParameters["max_number_splits"].GetInt()) {
// Expliting time step as a way to try improve the convergence
split_number += 1;
double aux_delta_time, current_time;
const double aux_time = SplitTimeStep(aux_delta_time, current_time);
current_time += aux_delta_time;
bool inside_the_split_is_converged = false;
IndexType inner_iteration = 0;
while (current_time <= aux_time) {
inner_iteration += 1;
r_process_info[STEP] += 1;
if (inner_iteration == 1) {
if (StrategyBaseType::MoveMeshFlag())
UnMoveMesh();
NodesArrayType& nodes_array = r_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
it_node->OverwriteSolutionStepData(1, 0);
// it_node->OverwriteSolutionStepData(2, 1);
}
r_process_info.SetCurrentTime(current_time); // Reduces the time step
FinalizeSolutionStep();
} else {
NodesArrayType& nodes_array = r_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i)
(nodes_array.begin() + i)->CloneSolutionStepData();
r_process_info.CloneSolutionStepInfo();
r_process_info.ClearHistory(r_model_part.GetBufferSize());
r_process_info.SetAsTimeStepInfo(current_time); // Sets the new time step
}
// We execute the processes before the non-linear iteration
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteInitializeSolutionStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->ExecuteInitializeSolutionStep();
// In order to initialize again everything
BaseType::mInitializeWasPerformed = false;
mFinalizeWasPerformed = false;
// We repeat the solve with the new DELTA_TIME
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
inside_the_split_is_converged = BaseType::SolveSolutionStep();
this->FinalizeSolutionStep();
// We execute the processes after the non-linear iteration
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteFinalizeSolutionStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->ExecuteFinalizeSolutionStep();
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteBeforeOutputStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->PrintOutput();
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteAfterOutputStep();
current_time += aux_delta_time;
}
if (inside_the_split_is_converged)
is_converged = true;
}
// Plots a warning if the maximum number of iterations and splits are exceeded
if (is_converged == false)
MaxIterationsAndSplitsExceeded();
// Restoring original DELTA_TIME
r_process_info[DELTA_TIME] = original_delta_time;
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief Here the database is updated
* @param A The LHS matrix
* @param Dx The increment of solution after solving system
* @param b The RHS vector
* @param MoveMesh The flag that tells if the mesh should be moved
*/
void UpdateDatabase(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
const bool MoveMesh
) override
{
BaseType::UpdateDatabase(A,Dx,b,MoveMesh);
// TODO: Add something if necessary
}
/**
* @brief his method checks if there is no element inverted
*/
bool CheckGeometryInverted()
{
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
bool inverted_element = false;
ElementsArrayType& elements_array = r_model_part.Elements();
// NOT OMP
for(int i = 0; i < static_cast<int>(elements_array.size()); ++i) {
auto it_elem = elements_array.begin() + i;
auto& geom = it_elem->GetGeometry();
if (geom.DeterminantOfJacobian(0) < 0.0) {
if (mConvergenceCriteriaEchoLevel > 0) {
KRATOS_WATCH(it_elem->Id())
KRATOS_WATCH(geom.DeterminantOfJacobian(0))
}
return true;
}
// We check now the deformation gradient
std::vector<Matrix> deformation_gradient_matrices;
it_elem->CalculateOnIntegrationPoints( DEFORMATION_GRADIENT, deformation_gradient_matrices, r_process_info);
for (IndexType i_gp = 0; i_gp < deformation_gradient_matrices.size(); ++i_gp) {
const double det_f = MathUtils<double>::Det(deformation_gradient_matrices[i_gp]);
if (det_f < 0.0) {
if (mConvergenceCriteriaEchoLevel > 0) {
KRATOS_WATCH(it_elem->Id())
KRATOS_WATCH(det_f)
}
return true;
}
}
}
return inverted_element;
}
/**
* @brief Here the time step is splitted
* @param AuxDeltaTime The new delta time to be considered
* @param CurrentTime The current time
* @return The destination time
*/
double SplitTimeStep(
double& AuxDeltaTime,
double& CurrentTime
)
{
KRATOS_TRY;
const double aux_time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME];
AuxDeltaTime = StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME];
CurrentTime = aux_time - AuxDeltaTime;
StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] = CurrentTime; // Restore time to the previous one
AuxDeltaTime /= mThisParameters["split_factor"].GetDouble();
StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] = AuxDeltaTime; // Change delta time
CoutSplittingTime(AuxDeltaTime, aux_time);
return aux_time;
KRATOS_CATCH("");
}
/**
* This method moves bak the mesh to the previous position
*/
void UnMoveMesh()
{
KRATOS_TRY;
if (StrategyBaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false)
KRATOS_ERROR << "It is impossible to move the mesh since the DISPLACEMENT var is not in the model_part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl;
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates();
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
}
KRATOS_CATCH("");
}
/**
* @brief This method prints information after solving the problem
*/
void CoutSolvingProblem()
{
if (mConvergenceCriteriaEchoLevel != 0) {
std::cout << "STEP: " << StrategyBaseType::GetModelPart().GetProcessInfo()[STEP] << "\t NON LINEAR ITERATION: " << StrategyBaseType::GetModelPart().GetProcessInfo()[NL_ITERATION_NUMBER] << "\t TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] << "\t DELTA TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] << std::endl;
}
}
/**
* @brief This method prints information after split the increment of time
* @param AuxDeltaTime The new time step to be considered
* @param AuxTime The destination time
*/
void CoutSplittingTime(
const double AuxDeltaTime,
const double AuxTime
)
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
const double Time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME];
std::cout.precision(4);
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT("SPLITTING TIME STEP") << " |" << std::endl;
std::cout << "| " << BOLDFONT("COMING BACK TO TIME: ") << std::scientific << Time << " |" << std::endl;
std::cout << "| " << BOLDFONT(" NEW TIME STEP: ") << std::scientific << AuxDeltaTime << " |" << std::endl;
std::cout << "| " << BOLDFONT(" UNTIL TIME: ") << std::scientific << AuxTime << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
/**
* @brief This method prints information after reach the max number of interations
*/
void MaxIterationsExceeded() override
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
/**
* @brief This method prints information after reach the max number of interations and splits
*/
void MaxIterationsAndSplitsExceeded()
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl;
std::cout << "| " << BOLDFONT(FRED(" Max number of splits exceeded ")) << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
ResidualBasedNewtonRaphsonContactStrategy(const ResidualBasedNewtonRaphsonContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedNewtonRaphsonContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY */
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is tail-allocated.
unsigned ResultKind : 2;
/// The kind of Result as defined by APValue::Kind.
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64, true if the tail-allocated integer is
/// unsigned.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated
/// integer. 7 bits because it is the minimal number of bits to represent a
/// value from 0 to 64 (the size of the tail-allocated integer).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the
/// tail-allocated APValue.
unsigned HasCleanup : 1;
/// True if this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
//
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArrayOrMatrixSubscriptExprBitfields {
friend class ArraySubscriptExpr;
friend class MatrixSubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// True if the call expression has some floating-point features.
unsigned HasFPFeatures : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 3 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
/// expression. Used to determine if a statement expression remains
/// dependent after instantiation.
unsigned TemplateDepth;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait. According to [implimits]
/// 8 bits would be enough, but we require (and test for) at least 16 bits
/// to mirror FunctionType.
unsigned NumArgs;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class LambdaExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class LambdaExpr;
unsigned : NumExprBits;
/// The default capture kind, which is a value of type
/// LambdaCaptureDefault.
unsigned CaptureDefault : 2;
/// Whether this lambda had an explicit parameter list vs. an
/// implicit (and empty) parameter list.
unsigned ExplicitParams : 1;
/// Whether this lambda had the result type explicitly specified.
unsigned ExplicitResultType : 1;
/// The number of captures.
unsigned NumCaptures : 16;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// GNU Extensions.
StmtExprBitfields StmtExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
LambdaExprBitfields LambdaExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(raw_ostream &OS, const ASTContext &Context) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc;
SourceLocation RParenLoc;
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
SourceLocation LPL, SourceLocation RPL, Stmt *Then,
SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc;
SourceLocation RParenLoc;
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc, RParenLoc;
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
par_csr_matop.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_utilities.h"
#include "hypre_hopscotch_hash.h"
#include "_hypre_parcsr_mv.h"
#include "_hypre_lapack.h"
#include "_hypre_blas.h"
/*--------------------------------------------------------------------------
* hypre_ParMatmul_RowSizes:
*
* Computes sizes of C rows. Formerly part of hypre_ParMatmul but removed
* so it can also be used for multiplication of Boolean matrices.
*
* Arrays computed: C_diag_i, C_offd_i.
*
* Arrays needed: (17, all HYPRE_Int*)
* rownnz_A,
* A_diag_i, A_diag_j,
* A_offd_i, A_offd_j,
* B_diag_i, B_diag_j,
* B_offd_i, B_offd_j,
* B_ext_i, B_ext_j,
* col_map_offd_B, col_map_offd_B,
* B_offd_i, B_offd_j,
* B_ext_i, B_ext_j.
*
* Scalars computed: C_diag_size, C_offd_size.
*
* Scalars needed:
* num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare,
* first_col_diag_B, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C
*--------------------------------------------------------------------------*/
void
hypre_ParMatmul_RowSizes( HYPRE_MemoryLocation memory_location,
HYPRE_Int **C_diag_i,
HYPRE_Int **C_offd_i,
HYPRE_Int *rownnz_A,
HYPRE_Int *A_diag_i,
HYPRE_Int *A_diag_j,
HYPRE_Int *A_offd_i,
HYPRE_Int *A_offd_j,
HYPRE_Int *B_diag_i,
HYPRE_Int *B_diag_j,
HYPRE_Int *B_offd_i,
HYPRE_Int *B_offd_j,
HYPRE_Int *B_ext_diag_i,
HYPRE_Int *B_ext_diag_j,
HYPRE_Int *B_ext_offd_i,
HYPRE_Int *B_ext_offd_j,
HYPRE_Int *map_B_to_C,
HYPRE_Int *C_diag_size,
HYPRE_Int *C_offd_size,
HYPRE_Int num_rownnz_A,
HYPRE_Int num_rows_diag_A,
HYPRE_Int num_cols_offd_A,
HYPRE_Int allsquare,
HYPRE_Int num_cols_diag_B,
HYPRE_Int num_cols_offd_B,
HYPRE_Int num_cols_offd_C )
{
HYPRE_Int *jj_count_diag_array;
HYPRE_Int *jj_count_offd_array;
HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */
HYPRE_Int num_threads = hypre_NumThreads();
*C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location);
*C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location);
jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Loop over rows of A
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int jj_row_begin_diag, jj_count_diag;
HYPRE_Int jj_row_begin_offd, jj_count_offd;
HYPRE_Int i1, ii1, i2, i3, jj2, jj3;
HYPRE_Int size, rest, num_threads;
HYPRE_Int ii, ns, ne;
num_threads = hypre_NumActiveThreads();
size = num_rownnz_A/num_threads;
rest = num_rownnz_A - size*num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
if (num_cols_diag_B || num_cols_offd_C)
{
B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST);
}
for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++)
{
B_marker[i1] = -1;
}
for (i1 = ns; i1 < ne; i1++)
{
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if (rownnz_A)
{
ii1 = rownnz_A[i1];
}
else
{
ii1 = i1;
/*--------------------------------------------------------------------
* Set marker for diagonal entry, C_{i1,i1} (for square matrices).
*--------------------------------------------------------------------*/
if (allsquare)
{
B_marker[i1] = jj_count_diag;
jj_count_diag++;
}
}
/*-----------------------------------------------------------------
* Loop over entries in row ii1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1+1]; jj2++)
{
i2 = A_offd_j[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_ext.
*-----------------------------------------------------------*/
for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++)
{
i3 = num_cols_diag_B+B_ext_offd_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++)
{
i3 = B_ext_diag_j[jj3];
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row ii1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1+1]; jj2++)
{
i2 = A_diag_j[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_diag.
*-----------------------------------------------------------*/
for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++)
{
i3 = B_diag_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_offd.
*-----------------------------------------------------------*/
if (num_cols_offd_B)
{
for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++)
{
i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
/*--------------------------------------------------------------------
* Set C_diag_i and C_offd_i for this row.
*--------------------------------------------------------------------*/
(*C_diag_i)[ii1] = jj_row_begin_diag;
(*C_offd_i)[ii1] = jj_row_begin_offd;
}
jj_count_diag_array[ii] = jj_count_diag;
jj_count_offd_array[ii] = jj_count_offd;
hypre_TFree(B_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct diag_i and offd_i - phase 1 */
if (ii)
{
jj_count_diag = jj_count_diag_array[0];
jj_count_offd = jj_count_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
jj_count_diag += jj_count_diag_array[i1];
jj_count_offd += jj_count_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
ii1 = rownnz_A ? rownnz_A[i1] : i1;
(*C_diag_i)[ii1] += jj_count_diag;
(*C_offd_i)[ii1] += jj_count_offd;
}
}
else
{
(*C_diag_i)[num_rows_diag_A] = 0;
(*C_offd_i)[num_rows_diag_A] = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
(*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1];
(*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1];
}
}
/* Correct diag_i and offd_i - phase 2 */
if (rownnz_A != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i1 = ns; i1 < (ne-1); i1++)
{
for (ii1 = rownnz_A[i1] + 1; ii1 < rownnz_A[i1+1]; ii1++)
{
(*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[i1+1]];
(*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[i1+1]];
}
}
if (ii < (num_threads - 1))
{
for (ii1 = rownnz_A[ne-1] + 1; ii1 < rownnz_A[ne]; ii1++)
{
(*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[ne]];
(*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[ne]];
}
}
else
{
for (ii1 = rownnz_A[ne-1] + 1; ii1 < num_rows_diag_A; ii1++)
{
(*C_diag_i)[ii1] = (*C_diag_i)[num_rows_diag_A];
(*C_offd_i)[ii1] = (*C_offd_i)[num_rows_diag_A];
}
}
}
} /* end parallel loop */
*C_diag_size = (*C_diag_i)[num_rows_diag_A];
*C_offd_size = (*C_offd_i)[num_rows_diag_A];
#ifdef HYPRE_DEBUG
HYPRE_Int i;
for (i = 0; i < num_rows_diag_A; i++)
{
hypre_assert((*C_diag_i)[i] <= (*C_diag_i)[i+1]);
hypre_assert((*C_offd_i)[i] <= (*C_offd_i)[i+1]);
}
#endif
hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST);
/* End of First Pass */
}
/*--------------------------------------------------------------------------
* hypre_ParMatmul:
*
* Multiplies two ParCSRMatrices A and B and returns the product in
* ParCSRMatrix C.
*
* Note: C does not own the partitionings since its row_starts
* is owned by A and col_starts by B.
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix*
hypre_ParMatmul( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime();
#endif
/* ParCSRMatrix A */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt nrows_A = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt ncols_A = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int num_rownnz_A;
HYPRE_Int *rownnz_A = NULL;
/* ParCSRMatrix B */
HYPRE_BigInt nrows_B = hypre_ParCSRMatrixGlobalNumRows(B);
HYPRE_BigInt ncols_B = hypre_ParCSRMatrixGlobalNumCols(B);
HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B);
HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B);
HYPRE_BigInt last_col_diag_B;
/* A_diag */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_diag_ir = hypre_CSRMatrixRownnz(A_diag);
HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag);
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
/* A_offd */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int *A_offd_ir = hypre_CSRMatrixRownnz(A_offd);
HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd);
HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
/* B_diag */
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag);
HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag);
/* B_offd */
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
/* ParCSRMatrix C */
hypre_ParCSRMatrix *C;
HYPRE_BigInt *col_map_offd_C;
HYPRE_Int *map_B_to_C = NULL;
/* C_diag */
hypre_CSRMatrix *C_diag;
HYPRE_Complex *C_diag_data;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j;
HYPRE_Int C_offd_size;
HYPRE_Int num_cols_offd_C = 0;
/* C_offd */
hypre_CSRMatrix *C_offd;
HYPRE_Complex *C_offd_data = NULL;
HYPRE_Int *C_offd_i = NULL;
HYPRE_Int *C_offd_j = NULL;
HYPRE_Int C_diag_size;
/* Bs_ext */
hypre_CSRMatrix *Bs_ext;
HYPRE_Complex *Bs_ext_data;
HYPRE_Int *Bs_ext_i;
HYPRE_BigInt *Bs_ext_j;
HYPRE_Complex *B_ext_diag_data;
HYPRE_Int *B_ext_diag_i;
HYPRE_Int *B_ext_diag_j;
HYPRE_Int B_ext_diag_size;
HYPRE_Complex *B_ext_offd_data;
HYPRE_Int *B_ext_offd_i;
HYPRE_Int *B_ext_offd_j;
HYPRE_BigInt *B_big_offd_j = NULL;
HYPRE_Int B_ext_offd_size;
HYPRE_Int allsquare = 0;
HYPRE_Int num_procs;
HYPRE_Int *my_diag_array;
HYPRE_Int *my_offd_array;
HYPRE_Int max_num_threads;
HYPRE_Complex zero = 0.0;
HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
max_num_threads = hypre_NumThreads();
my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
if (ncols_A != nrows_B || num_cols_diag_A != num_rows_diag_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n");
return NULL;
}
/* if C=A*B is square globally and locally, then C_diag should be square also */
if ( num_rows_diag_A == num_cols_diag_B && nrows_A == ncols_B )
{
allsquare = 1;
}
/* Set rownnz of A */
if (num_rownnz_diag_A != num_rows_diag_A &&
num_rownnz_offd_A != num_rows_offd_A )
{
hypre_MergeOrderedArrays(num_rownnz_diag_A, A_diag_ir,
num_rownnz_offd_A, A_offd_ir,
&num_rownnz_A, &rownnz_A);
}
else
{
num_rownnz_A = hypre_max(num_rows_diag_A, num_rows_offd_A);
}
/*-----------------------------------------------------------------------
* Extract B_ext, i.e. portion of B that is stored on neighbor procs
* and needed locally for matrix matrix product
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm, &num_procs);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
#endif
if (num_procs > 1)
{
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings within
* hypre_ParCSRMatrixExtractBExt
*--------------------------------------------------------------------*/
Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1);
Bs_ext_data = hypre_CSRMatrixData(Bs_ext);
Bs_ext_i = hypre_CSRMatrixI(Bs_ext);
Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext);
}
B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST);
B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST);
B_ext_diag_size = 0;
B_ext_offd_size = 0;
last_col_diag_B = first_col_diag_B + (HYPRE_BigInt) num_cols_diag_B - 1;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedBigIntSet set;
#pragma omp parallel
{
HYPRE_Int size, rest, ii;
HYPRE_Int ns, ne;
HYPRE_Int i1, i, j;
HYPRE_Int my_offd_size, my_diag_size;
HYPRE_Int cnt_offd, cnt_diag;
HYPRE_Int num_threads = hypre_NumActiveThreads();
size = num_cols_offd_A/num_threads;
rest = num_cols_offd_A - size*num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
my_diag_size = 0;
my_offd_size = 0;
for (i = ns; i < ne; i++)
{
B_ext_diag_i[i] = my_diag_size;
B_ext_offd_i[i] = my_offd_size;
for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B ||
Bs_ext_j[j] > last_col_diag_B)
{
my_offd_size++;
}
else
{
my_diag_size++;
}
}
}
my_diag_array[ii] = my_diag_size;
my_offd_array[ii] = my_offd_size;
#pragma omp barrier
if (ii)
{
my_diag_size = my_diag_array[0];
my_offd_size = my_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
my_diag_size += my_diag_array[i1];
my_offd_size += my_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
B_ext_diag_i[i1] += my_diag_size;
B_ext_offd_i[i1] += my_offd_size;
}
}
else
{
B_ext_diag_size = 0;
B_ext_offd_size = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
B_ext_diag_size += my_diag_array[i1];
B_ext_offd_size += my_offd_array[i1];
}
B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size;
B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size;
if (B_ext_diag_size)
{
B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST);
B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size)
{
B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST);
}
hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads());
}
#pragma omp barrier
cnt_offd = B_ext_offd_i[ns];
cnt_diag = B_ext_diag_i[ns];
for (i = ns; i < ne; i++)
{
for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B ||
Bs_ext_j[j] > last_col_diag_B)
{
hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]);
B_big_offd_j[cnt_offd] = Bs_ext_j[j];
//Bs_ext_j[cnt_offd] = Bs_ext_j[j];
B_ext_offd_data[cnt_offd++] = Bs_ext_data[j];
}
else
{
B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B);
B_ext_diag_data[cnt_diag++] = Bs_ext_data[j];
}
}
}
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B);
for (i = i_begin; i < i_end; i++)
{
hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]);
}
} /* omp parallel */
col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C);
hypre_UnorderedBigIntSetDestroy(&set);
hypre_UnorderedBigIntMap col_map_offd_C_inverse;
hypre_big_sort_and_create_inverse_map(col_map_offd_C,
num_cols_offd_C,
&col_map_offd_C,
&col_map_offd_C_inverse);
HYPRE_Int i, j;
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_A; i++)
{
for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++)
{
//B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]);
B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]);
}
}
if (num_cols_offd_C)
{
hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse);
}
hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST);
if (num_cols_offd_B)
{
HYPRE_Int i;
map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST);
#pragma omp parallel private(i)
{
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C);
HYPRE_Int cnt;
if (i_end > i_begin)
{
cnt = hypre_BigLowerBound(col_map_offd_B,
col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B,
col_map_offd_C[i_begin]) - col_map_offd_B;
}
for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++)
{
if (col_map_offd_C[i] == col_map_offd_B[cnt])
{
map_B_to_C[cnt++] = i;
}
}
}
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Bs_ext);
Bs_ext = NULL;
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
HYPRE_BigInt *temp;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int size, rest, ii;
HYPRE_Int ns, ne;
HYPRE_Int i1, i, j;
HYPRE_Int my_offd_size, my_diag_size;
HYPRE_Int cnt_offd, cnt_diag;
HYPRE_Int num_threads = hypre_NumActiveThreads();
size = num_cols_offd_A/num_threads;
rest = num_cols_offd_A - size*num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
my_diag_size = 0;
my_offd_size = 0;
for (i = ns; i < ne; i++)
{
B_ext_diag_i[i] = my_diag_size;
B_ext_offd_i[i] = my_offd_size;
for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B ||
Bs_ext_j[j] > last_col_diag_B)
{
my_offd_size++;
}
else
{
my_diag_size++;
}
}
}
my_diag_array[ii] = my_diag_size;
my_offd_array[ii] = my_offd_size;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii)
{
my_diag_size = my_diag_array[0];
my_offd_size = my_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
my_diag_size += my_diag_array[i1];
my_offd_size += my_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
B_ext_diag_i[i1] += my_diag_size;
B_ext_offd_i[i1] += my_offd_size;
}
}
else
{
B_ext_diag_size = 0;
B_ext_offd_size = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
B_ext_diag_size += my_diag_array[i1];
B_ext_offd_size += my_offd_array[i1];
}
B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size;
B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size;
if (B_ext_diag_size)
{
B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST);
B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size)
{
B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size || num_cols_offd_B)
{
temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size+num_cols_offd_B, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
cnt_offd = B_ext_offd_i[ns];
cnt_diag = B_ext_diag_i[ns];
for (i = ns; i < ne; i++)
{
for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B ||
Bs_ext_j[j] > last_col_diag_B)
{
temp[cnt_offd] = Bs_ext_j[j];
B_big_offd_j[cnt_offd] = Bs_ext_j[j];
//Bs_ext_j[cnt_offd] = Bs_ext_j[j];
B_ext_offd_data[cnt_offd++] = Bs_ext_data[j];
}
else
{
B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B);
B_ext_diag_data[cnt_diag++] = Bs_ext_data[j];
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii == 0)
{
HYPRE_Int cnt;
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Bs_ext);
Bs_ext = NULL;
}
cnt = 0;
if (B_ext_offd_size || num_cols_offd_B)
{
cnt = B_ext_offd_size;
for (i = 0; i < num_cols_offd_B; i++)
{
temp[cnt++] = col_map_offd_B[i];
}
if (cnt)
{
HYPRE_BigInt value;
hypre_BigQsort0(temp, 0, cnt-1);
num_cols_offd_C = 1;
value = temp[0];
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
{
col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_cols_offd_C; i++)
{
col_map_offd_C[i] = temp[i];
}
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < ne; i++)
{
for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++)
{
B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j],
//B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j],
num_cols_offd_C);
}
}
} /* end parallel region */
hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST);
if (num_cols_offd_B)
{
HYPRE_Int i, cnt;
map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST);
cnt = 0;
for (i = 0; i < num_cols_offd_C; i++)
{
if (col_map_offd_C[i] == col_map_offd_B[cnt])
{
map_B_to_C[cnt++] = i;
if (cnt == num_cols_offd_B) break;
}
}
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
#endif
hypre_ParMatmul_RowSizes(memory_location_C, &C_diag_i, &C_offd_i,
rownnz_A, A_diag_i, A_diag_j,
A_offd_i, A_offd_j,
B_diag_i, B_diag_j,
B_offd_i, B_offd_j,
B_ext_diag_i, B_ext_diag_j,
B_ext_offd_i, B_ext_offd_j, map_B_to_C,
&C_diag_size, &C_offd_size,
num_rownnz_A, num_rows_diag_A, num_cols_offd_A,
allsquare, num_cols_diag_B, num_cols_offd_B,
num_cols_offd_C);
/*-----------------------------------------------------------------------
* Allocate C_diag_data and C_diag_j arrays.
* Allocate C_offd_data and C_offd_j arrays.
*-----------------------------------------------------------------------*/
last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1;
C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, memory_location_C);
C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, memory_location_C);
if (C_offd_size)
{
C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, memory_location_C);
C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, memory_location_C);
}
/*-----------------------------------------------------------------------
* Second Pass: Fill in C_diag_data and C_diag_j.
* Second Pass: Fill in C_offd_data and C_offd_j.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int ns, ne, size, rest, ii;
HYPRE_Int i1, ii1, i2, i3, jj2, jj3;
HYPRE_Int jj_row_begin_diag, jj_count_diag;
HYPRE_Int jj_row_begin_offd, jj_count_offd;
HYPRE_Int num_threads;
HYPRE_Complex a_entry; /*, a_b_product;*/
num_threads = hypre_NumActiveThreads();
size = num_rownnz_A/num_threads;
rest = num_rownnz_A - size*num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
jj_count_diag = C_diag_i[rownnz_A ? rownnz_A[ns] : ns];
jj_count_offd = C_offd_i[rownnz_A ? rownnz_A[ns] : ns];
if (num_cols_diag_B || num_cols_offd_C)
{
B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C,
HYPRE_MEMORY_HOST);
for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++)
{
B_marker[i1] = -1;
}
}
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (i1 = ns; i1 < ne; i1++)
{
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if (rownnz_A)
{
ii1 = rownnz_A[i1];
}
else
{
ii1 = i1;
/*--------------------------------------------------------------------
* Create diagonal entry, C_{i1,i1}
*--------------------------------------------------------------------*/
if (allsquare)
{
B_marker[i1] = jj_count_diag;
C_diag_data[jj_count_diag] = zero;
C_diag_j[jj_count_diag] = i1;
jj_count_diag++;
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1+1]; jj2++)
{
i2 = A_offd_j[jj2];
a_entry = A_offd_data[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_ext.
*-----------------------------------------------------------*/
for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++)
{
i3 = num_cols_diag_B+B_ext_offd_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3];
C_offd_j[jj_count_offd] = i3-num_cols_diag_B;
jj_count_offd++;
}
else
{
C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3];
}
}
for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++)
{
i3 = B_ext_diag_j[jj3];
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3];
C_diag_j[jj_count_diag] = i3;
jj_count_diag++;
}
else
{
C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3];
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row ii1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1+1]; jj2++)
{
i2 = A_diag_j[jj2];
a_entry = A_diag_data[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_diag.
*-----------------------------------------------------------*/
for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++)
{
i3 = B_diag_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3];
C_diag_j[jj_count_diag] = i3;
jj_count_diag++;
}
else
{
C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3];
}
}
if (num_cols_offd_B)
{
for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++)
{
i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3];
C_offd_j[jj_count_offd] = i3-num_cols_diag_B;
jj_count_offd++;
}
else
{
C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3];
}
}
}
}
}
hypre_TFree(B_marker, HYPRE_MEMORY_HOST);
} /*end parallel region */
C = hypre_ParCSRMatrixCreate(comm, nrows_A, ncols_B, row_starts_A,
col_starts_B, num_cols_offd_C,
C_diag_size, C_offd_size);
/* Note that C does not own the partitionings */
hypre_ParCSRMatrixSetRowStartsOwner(C, 0);
hypre_ParCSRMatrixSetColStartsOwner(C, 0);
C_diag = hypre_ParCSRMatrixDiag(C);
hypre_CSRMatrixData(C_diag) = C_diag_data;
hypre_CSRMatrixI(C_diag) = C_diag_i;
hypre_CSRMatrixJ(C_diag) = C_diag_j;
hypre_CSRMatrixSetRownnz(C_diag);
C_offd = hypre_ParCSRMatrixOffd(C);
hypre_CSRMatrixI(C_offd) = C_offd_i;
hypre_ParCSRMatrixOffd(C) = C_offd;
if (num_cols_offd_C)
{
hypre_CSRMatrixData(C_offd) = C_offd_data;
hypre_CSRMatrixJ(C_offd) = C_offd_j;
hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C;
}
hypre_CSRMatrixSetRownnz(C_offd);
hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C;
hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C;
/*-----------------------------------------------------------------------
* Free various arrays
*-----------------------------------------------------------------------*/
hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST);
if (B_ext_diag_size)
{
hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST);
if (B_ext_offd_size)
{
hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_B)
{
hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST);
}
hypre_TFree(rownnz_A, HYPRE_MEMORY_HOST);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime();
#endif
return C;
}
/* The following function was formerly part of hypre_ParCSRMatrixExtractBExt
but the code was removed so it can be used for a corresponding function
for Boolean matrices
JSP: to allow communication overlapping, it returns comm_handle_idx and
comm_handle_data. Before accessing B, they should be destroyed (including
send_data contained in the comm_handle).
*/
void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap(
HYPRE_Int ** pB_ext_i,
HYPRE_BigInt ** pB_ext_j,
HYPRE_Complex ** pB_ext_data,
HYPRE_BigInt ** pB_ext_row_map,
HYPRE_Int * num_nonzeros,
HYPRE_Int data,
HYPRE_Int find_row_map,
MPI_Comm comm,
hypre_ParCSRCommPkg * comm_pkg,
HYPRE_Int num_cols_B,
HYPRE_Int num_recvs,
HYPRE_Int num_sends,
HYPRE_BigInt first_col_diag,
HYPRE_BigInt * row_starts,
HYPRE_Int * recv_vec_starts,
HYPRE_Int * send_map_starts,
HYPRE_Int * send_map_elmts,
HYPRE_Int * diag_i,
HYPRE_Int * diag_j,
HYPRE_Int * offd_i,
HYPRE_Int * offd_j,
HYPRE_BigInt * col_map_offd,
HYPRE_Real * diag_data,
HYPRE_Real * offd_data,
hypre_ParCSRCommHandle **comm_handle_idx,
hypre_ParCSRCommHandle **comm_handle_data,
HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd,
HYPRE_Int skip_fine, /* 1 if only coarse points are needed */
HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */
// extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix
// other interpolation: skip_fine = 0, skip_same_sign = 0
)
{
hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL;
hypre_ParCSRCommPkg *tmp_comm_pkg;
HYPRE_Int *B_int_i;
HYPRE_BigInt *B_int_j;
HYPRE_Int *B_ext_i;
HYPRE_BigInt * B_ext_j;
HYPRE_Complex * B_ext_data;
HYPRE_Complex * B_int_data;
HYPRE_BigInt * B_int_row_map;
HYPRE_BigInt * B_ext_row_map;
HYPRE_Int num_procs, my_id;
HYPRE_Int *jdata_recv_vec_starts;
HYPRE_Int *jdata_send_map_starts;
HYPRE_Int i, j, k;
HYPRE_Int start_index;
/*HYPRE_Int jrow;*/
HYPRE_Int num_rows_B_ext;
HYPRE_Int *prefix_sum_workspace;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
HYPRE_BigInt first_row_index = row_starts[0];
num_rows_B_ext = recv_vec_starts[num_recvs];
if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */
*pB_ext_i = NULL;
*pB_ext_j = NULL;
if ( data ) *pB_ext_data = NULL;
if ( find_row_map ) *pB_ext_row_map = NULL;
*num_nonzeros = 0;
return;
};
B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1, HYPRE_MEMORY_HOST);
B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1, HYPRE_MEMORY_HOST);
*pB_ext_i = B_ext_i;
if ( find_row_map ) {
B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends]+1 , HYPRE_MEMORY_HOST);
B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext+1 , HYPRE_MEMORY_HOST);
*pB_ext_row_map = B_ext_row_map;
};
/*--------------------------------------------------------------------------
* generate B_int_i through adding number of row-elements of offd and diag
* for corresponding rows. B_int_i[j+1] contains the number of elements of
* a row j (which is determined through send_map_elmts)
*--------------------------------------------------------------------------*/
jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
jdata_send_map_starts[0] = B_int_i[0] = 0;
/*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,k)
#endif
{
/*HYPRE_Int counts[num_sends];*/
HYPRE_Int *counts;
counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST);
for (i=0; i < num_sends; i++)
{
HYPRE_Int j_begin, j_end;
hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]);
j_begin += send_map_starts[i];
j_end += send_map_starts[i];
HYPRE_Int count = 0;
if (skip_fine && skip_same_sign)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
HYPRE_Int len = 0;
if (diag_data[diag_i[jrow]] >= 0)
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++;
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
if (offd_data[k] < 0) len++;
}
}
else
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++;
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
if (offd_data[k] > 0) len++;
}
}
B_int_i[j + 1] = len;
count += len;
}
}
else if (skip_fine)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
HYPRE_Int len = 0;
for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++)
{
if (CF_marker[diag_j[k]] >= 0) len++;
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
if (CF_marker_offd[offd_j[k]] >= 0) len++;
}
B_int_i[j + 1] = len;
count += len;
}
}
else
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow];
len += offd_i[jrow + 1] - offd_i[jrow];
B_int_i[j + 1] = len;
count += len;
}
}
if (find_row_map)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index;
}
}
counts[i] = count;
}
hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace);
#ifdef HYPRE_USING_OPENMP
#pragma omp master
#endif
{
for (i = 1; i < num_sends; i++)
{
jdata_send_map_starts[i + 1] += jdata_send_map_starts[i];
}
/*--------------------------------------------------------------------------
* initialize communication
*--------------------------------------------------------------------------*/
comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,
&B_int_i[1],&(B_ext_i[1]) );
if ( find_row_map )
{
/* scatter/gather B_int row numbers to form array of B_ext row numbers */
row_map_comm_handle = hypre_ParCSRCommHandleCreate
(21,comm_pkg, B_int_row_map, B_ext_row_map );
}
B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST);
if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = 0; i < num_sends; i++)
{
HYPRE_Int j_begin, j_end;
hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]);
j_begin += send_map_starts[i];
j_end += send_map_starts[i];
HYPRE_Int count = counts[i] + jdata_send_map_starts[i];
if (data)
{
if (skip_same_sign && skip_fine)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
/*HYPRE_Int count_begin = count;*/
if (diag_data[diag_i[jrow]] >= 0)
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0)
{
B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag;
B_int_data[count] = diag_data[k];
count++;
}
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
HYPRE_Int c = offd_j[k];
HYPRE_BigInt c_global = col_map_offd[c];
if (offd_data[k] < 0)
{
B_int_j[count] = c_global;
B_int_data[count] = offd_data[k];
count++;
}
}
}
else
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0)
{
B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag;
B_int_data[count] = diag_data[k];
count++;
}
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
HYPRE_Int c = offd_j[k];
HYPRE_BigInt c_global = col_map_offd[c];
if (offd_data[k] > 0)
{
B_int_j[count] = c_global;
B_int_data[count] = offd_data[k];
count++;
}
}
}
}
}
else
{
for (j = j_begin; j < j_end; ++j) {
HYPRE_Int jrow = send_map_elmts[j];
for (k = diag_i[jrow]; k < diag_i[jrow+1]; k++)
{
B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag;
B_int_data[count] = diag_data[k];
count++;
}
for (k = offd_i[jrow]; k < offd_i[jrow+1]; k++)
{
B_int_j[count] = col_map_offd[offd_j[k]];
B_int_data[count] = offd_data[k];
count++;
}
}
}
} // data
else
{
if (skip_fine)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++)
{
if (CF_marker[diag_j[k]] >= 0)
{
B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag;
count++;
}
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
if (CF_marker_offd[offd_j[k]] >= 0)
{
B_int_j[count] = col_map_offd[offd_j[k]];
count++;
}
}
}
}
else
{
for (j = j_begin; j < j_end; ++j) {
HYPRE_Int jrow = send_map_elmts[j];
for (k = diag_i[jrow]; k < diag_i[jrow+1]; k++)
{
B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag;
count++;
}
for (k = offd_i[jrow]; k < offd_i[jrow+1]; k++)
{
B_int_j[count] = col_map_offd[offd_j[k]];
count++;
}
}
}
} // !data
} /* for each send target */
hypre_TFree(counts, HYPRE_MEMORY_HOST);
} /* omp parallel. JSP: this takes most of time in this function */
hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST);
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) =
hypre_ParCSRCommPkgSendProcs(comm_pkg);
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) =
hypre_ParCSRCommPkgRecvProcs(comm_pkg);
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts;
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
/*--------------------------------------------------------------------------
* after communication exchange B_ext_i[j+1] contains the number of elements
* of a row j !
* evaluate B_ext_i and compute *num_nonzeros for B_ext
*--------------------------------------------------------------------------*/
for (i = 0; i < num_recvs; i++)
{
for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++)
{
B_ext_i[j+1] += B_ext_i[j];
}
}
*num_nonzeros = B_ext_i[num_rows_B_ext];
*pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST);
B_ext_j = *pB_ext_j;
if (data)
{
*pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST);
B_ext_data = *pB_ext_data;
}
for (i = 0; i < num_recvs; i++)
{
start_index = B_ext_i[recv_vec_starts[i]];
*num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index;
jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]];
}
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts;
*comm_handle_idx = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,B_int_j,B_ext_j);
if (data)
{
*comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data,
B_ext_data);
}
if (row_map_comm_handle)
{
hypre_ParCSRCommHandleDestroy(row_map_comm_handle);
row_map_comm_handle = NULL;
}
hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST);
hypre_TFree(B_int_i, HYPRE_MEMORY_HOST);
if ( find_row_map ) hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST);
/* end generic part */
}
void hypre_ParCSRMatrixExtractBExt_Arrays(
HYPRE_Int ** pB_ext_i,
HYPRE_BigInt ** pB_ext_j,
HYPRE_Complex ** pB_ext_data,
HYPRE_BigInt ** pB_ext_row_map,
HYPRE_Int * num_nonzeros,
HYPRE_Int data,
HYPRE_Int find_row_map,
MPI_Comm comm,
hypre_ParCSRCommPkg * comm_pkg,
HYPRE_Int num_cols_B,
HYPRE_Int num_recvs,
HYPRE_Int num_sends,
HYPRE_BigInt first_col_diag,
HYPRE_BigInt * row_starts,
HYPRE_Int * recv_vec_starts,
HYPRE_Int * send_map_starts,
HYPRE_Int * send_map_elmts,
HYPRE_Int * diag_i,
HYPRE_Int * diag_j,
HYPRE_Int * offd_i,
HYPRE_Int * offd_j,
HYPRE_BigInt * col_map_offd,
HYPRE_Real * diag_data,
HYPRE_Real * offd_data
)
{
hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data;
hypre_ParCSRMatrixExtractBExt_Arrays_Overlap(
pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros,
data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends,
first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts,
diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data,
&comm_handle_idx, &comm_handle_data,
NULL, NULL,
0, 0);
HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_idx);
hypre_TFree(send_idx, HYPRE_MEMORY_HOST);
if (data)
{
HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_data);
hypre_TFree(send_data, HYPRE_MEMORY_HOST);
}
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on
* other processors and needed for multiplication with A locally. The rows
* are returned as CSRMatrix.
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix *A,
HYPRE_Int data,
hypre_ParCSRCommHandle **comm_handle_idx,
hypre_ParCSRCommHandle **comm_handle_data,
HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd,
HYPRE_Int skip_fine, HYPRE_Int skip_same_sign )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(B);
HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B);
/*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_recvs;
HYPRE_Int *recv_vec_starts;
HYPRE_Int num_sends;
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag);
HYPRE_Real *diag_data = hypre_CSRMatrixData(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd);
HYPRE_Real *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int num_cols_B, num_nonzeros;
HYPRE_Int num_rows_B_ext;
hypre_CSRMatrix *B_ext;
HYPRE_Int *B_ext_i;
HYPRE_BigInt *B_ext_j;
HYPRE_Complex *B_ext_data;
HYPRE_BigInt *idummy;
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!hypre_ParCSRMatrixCommPkg(A))
{
hypre_MatvecCommPkgCreate(A);
}
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B);
num_rows_B_ext = recv_vec_starts[num_recvs];
hypre_ParCSRMatrixExtractBExt_Arrays_Overlap
( &B_ext_i, &B_ext_j, &B_ext_data, &idummy,
&num_nonzeros,
data, 0, comm, comm_pkg,
num_cols_B, num_recvs, num_sends,
first_col_diag, B->row_starts,
recv_vec_starts, send_map_starts, send_map_elmts,
diag_i, diag_j, offd_i, offd_j, col_map_offd,
diag_data, offd_data,
comm_handle_idx, comm_handle_data,
CF_marker, CF_marker_offd,
skip_fine, skip_same_sign
);
B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros);
hypre_CSRMatrixMemoryLocation(B_ext) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(B_ext) = B_ext_i;
hypre_CSRMatrixBigJ(B_ext) = B_ext_j;
if (data) hypre_CSRMatrixData(B_ext) = B_ext_data;
return B_ext;
}
hypre_CSRMatrix *
hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix *A,
HYPRE_Int want_data )
{
#if 0
hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data;
hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, want_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0);
HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_idx);
hypre_TFree(send_idx, HYPRE_MEMORY_HOST);
if (want_data)
{
HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_data);
hypre_TFree(send_data, HYPRE_MEMORY_HOST);
}
#else
hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) ==
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) );
hypre_CSRMatrix *B_ext;
void *request;
if (!hypre_ParCSRMatrixCommPkg(A))
{
hypre_MatvecCommPkgCreate(A);
}
hypre_ParcsrGetExternalRowsInit(B,
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)),
hypre_ParCSRMatrixColMapOffd(A),
hypre_ParCSRMatrixCommPkg(A),
want_data,
&request);
B_ext = hypre_ParcsrGetExternalRowsWait(request);
#endif
return B_ext;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix **AT_ptr,
HYPRE_Int data )
{
hypre_ParCSRCommHandle *comm_handle;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A);
HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, num_recvs, num_cols_offd_AT;
HYPRE_Int i, j, k, index, counter, j_row;
HYPRE_BigInt value;
hypre_ParCSRMatrix *AT;
hypre_CSRMatrix *AT_diag;
hypre_CSRMatrix *AT_offd;
hypre_CSRMatrix *AT_tmp;
HYPRE_BigInt first_row_index_AT, first_col_diag_AT;
HYPRE_Int local_num_rows_AT, local_num_cols_AT;
HYPRE_Int *AT_tmp_i;
HYPRE_Int *AT_tmp_j;
HYPRE_BigInt *AT_big_j = NULL;
HYPRE_Complex *AT_tmp_data;
HYPRE_Int *AT_buf_i;
HYPRE_BigInt *AT_buf_j;
HYPRE_Complex *AT_buf_data;
HYPRE_Int *AT_offd_i;
HYPRE_Int *AT_offd_j;
HYPRE_Complex *AT_offd_data;
HYPRE_BigInt *col_map_offd_AT;
HYPRE_BigInt *row_starts_AT;
HYPRE_BigInt *col_starts_AT;
HYPRE_Int num_procs, my_id;
HYPRE_Int *recv_procs;
HYPRE_Int *send_procs;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
HYPRE_Int *tmp_recv_vec_starts;
HYPRE_Int *tmp_send_map_starts;
hypre_ParCSRCommPkg *tmp_comm_pkg;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_cols_offd_AT = 0;
counter = 0;
AT_offd_j = NULL;
AT_offd_data = NULL;
col_map_offd_AT = NULL;
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A);
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data);
AT_tmp_i = hypre_CSRMatrixI(AT_tmp);
AT_tmp_j = hypre_CSRMatrixJ(AT_tmp);
if (data)
{
AT_tmp_data = hypre_CSRMatrixData(AT_tmp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
if (AT_tmp_i[num_cols_offd])
{
AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST);
}
for (i = 0; i < AT_tmp_i[num_cols_offd]; i++)
{
//AT_tmp_j[i] += first_row_index;
AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i]+first_row_index;
}
for (i = 0; i < num_cols_offd; i++)
{
AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i];
}
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i);
}
hypre_CSRMatrixTranspose(A_diag, &AT_diag, data);
AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1, memory_location);
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
tmp_send_map_starts[0] = send_map_starts[0];
for (i = 0; i < num_sends; i++)
{
tmp_send_map_starts[i+1] = tmp_send_map_starts[i];
for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
tmp_send_map_starts[i+1] += AT_buf_i[j];
AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j];
}
}
for (i = 0; i < num_cols; i++)
{
AT_offd_i[i+1] += AT_offd_i[i];
}
tmp_recv_vec_starts[0] = recv_vec_starts[0];
for (i = 0; i < num_recvs; i++)
{
tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i];
for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++)
{
tmp_recv_vec_starts[i+1] += AT_tmp_i[j];
}
}
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts;
AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST);
comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j,
AT_buf_j);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST);
if (data)
{
AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST);
comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data,
AT_buf_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(AT_tmp);
if (AT_offd_i[num_cols])
{
AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], memory_location);
AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST);
if (data)
{
AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], memory_location);
}
}
else
{
AT_offd_j = NULL;
AT_offd_data = NULL;
}
counter = 0;
for (i = 0; i < num_sends; i++)
{
for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
j_row = send_map_elmts[j];
index = AT_offd_i[j_row];
for (k = 0; k < AT_buf_i[j]; k++)
{
if (data)
{
AT_offd_data[index] = AT_buf_data[counter];
}
AT_big_j[index++] = AT_buf_j[counter++];
}
AT_offd_i[j_row] = index;
}
}
for (i = num_cols; i > 0; i--)
{
AT_offd_i[i] = AT_offd_i[i-1];
}
AT_offd_i[0] = 0;
if (counter)
{
hypre_BigQsort0(AT_buf_j,0,counter-1);
num_cols_offd_AT = 1;
value = AT_buf_j[0];
for (i = 1; i < counter; i++)
{
if (value < AT_buf_j[i])
{
AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i];
value = AT_buf_j[i];
}
}
}
if (num_cols_offd_AT)
{
col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST);
}
else
{
col_map_offd_AT = NULL;
}
for (i = 0; i < num_cols_offd_AT; i++)
{
col_map_offd_AT[i] = AT_buf_j[i];
}
hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST);
hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST);
if (data)
{
hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST);
}
for (i = 0; i < counter; i++)
{
AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT,AT_big_j[i],
num_cols_offd_AT);
}
hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST);
}
AT_offd = hypre_CSRMatrixCreate(num_cols, num_cols_offd_AT, counter);
hypre_CSRMatrixMemoryLocation(AT_offd) = memory_location;
hypre_CSRMatrixI(AT_offd) = AT_offd_i;
hypre_CSRMatrixJ(AT_offd) = AT_offd_j;
hypre_CSRMatrixData(AT_offd) = AT_offd_data;
row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
for (i = 0; i < 2; i++)
{
row_starts_AT[i] = col_starts[i];
}
if (row_starts != col_starts)
{
col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
for (i = 0; i < 2; i++)
{
col_starts_AT[i] = row_starts[i];
}
}
else
{
col_starts_AT = row_starts_AT;
}
first_row_index_AT = row_starts_AT[0];
first_col_diag_AT = col_starts_AT[0];
local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1]-first_row_index_AT );
local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1]-first_col_diag_AT);
AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixComm(AT) = comm;
hypre_ParCSRMatrixDiag(AT) = AT_diag;
hypre_ParCSRMatrixOffd(AT) = AT_offd;
hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A);
hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A);
hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT;
hypre_ParCSRMatrixColStarts(AT) = col_starts_AT;
hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT;
hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT;
hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT;
hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1;
hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1;
hypre_ParCSRMatrixOwnsData(AT) = 1;
hypre_ParCSRMatrixOwnsRowStarts(AT) = 1;
hypre_ParCSRMatrixOwnsColStarts(AT) = 1;
if (row_starts_AT == col_starts_AT)
{
hypre_ParCSRMatrixOwnsColStarts(AT) = 0;
}
hypre_ParCSRMatrixCommPkg(AT) = NULL;
hypre_ParCSRMatrixCommPkgT(AT) = NULL;
hypre_ParCSRMatrixRowindices(AT) = NULL;
hypre_ParCSRMatrixRowvalues(AT) = NULL;
hypre_ParCSRMatrixGetrowactive(AT) = 0;
hypre_ParCSRMatrixOwnsAssumedPartition(AT) = 1;
*AT_ptr = AT;
return ierr;
}
/* -----------------------------------------------------------------------------
* generate a parallel spanning tree (for Maxwell Equation)
* G_csr is the node to edge connectivity matrix
* ----------------------------------------------------------------------------- */
void
hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr,
HYPRE_Int **indices,
HYPRE_Int G_type )
{
HYPRE_BigInt nrows_G, ncols_G;
HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge;
HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node;
HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts;
HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j;
HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i;
HYPRE_Int *T_diag_j, *counts, offset;
MPI_Comm comm;
hypre_ParCSRCommPkg *comm_pkg;
hypre_CSRMatrix *G_diag;
/* fetch G matrix (G_type = 0 ==> node to edge) */
if (G_type == 0)
{
nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr);
ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr);
G_diag = hypre_ParCSRMatrixDiag(G_csr);
G_diag_i = hypre_CSRMatrixI(G_diag);
G_diag_j = hypre_CSRMatrixJ(G_diag);
}
else
{
nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr);
ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr);
G_diag = hypre_ParCSRMatrixDiag(G_csr);
T_diag_i = hypre_CSRMatrixI(G_diag);
T_diag_j = hypre_CSRMatrixJ(G_diag);
counts = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST);
for (i = 0; i < nrows_G; i++) counts[i] = 0;
for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++;
G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G+1) , HYPRE_MEMORY_HOST);
G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G] , HYPRE_MEMORY_HOST);
G_diag_i[0] = 0;
for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1];
for (i = 0; i < ncols_G; i++)
{
for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++)
{
k = T_diag_j[j];
offset = G_diag_i[k]++;
G_diag_j[offset] = i;
}
}
G_diag_i[0] = 0;
for (i = 1; i <= nrows_G; i++)
{
G_diag_i[i] = G_diag_i[i-1] + counts[i-1];
}
hypre_TFree(counts, HYPRE_MEMORY_HOST);
}
/* form G transpose in special form (2 nodes per edge max) */
GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G , HYPRE_MEMORY_HOST);
for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1;
for (i = 0; i < nrows_G; i++)
{
for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++)
{
edge = G_diag_j[j];
if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i;
else GT_diag_mat[edge*2+1] = i;
}
}
/* BFS on the local matrix graph to find tree */
nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST);
edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G , HYPRE_MEMORY_HOST);
for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0;
for (i = 0; i < ncols_G; i++) edges_marked[i] = 0;
queue = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST);
queue_head = 0;
queue_tail = 1;
queue[0] = 0;
nodes_marked[0] = 1;
while ((queue_tail-queue_head) > 0)
{
node = queue[queue_tail-1];
queue_tail--;
for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++)
{
edge = G_diag_j[i];
if (edges_marked[edge] == 0)
{
if (GT_diag_mat[2*edge+1] != -1)
{
node2 = GT_diag_mat[2*edge];
if (node2 == node) node2 = GT_diag_mat[2*edge+1];
if (nodes_marked[node2] == 0)
{
nodes_marked[node2] = 1;
edges_marked[edge] = 1;
queue[queue_tail] = node2;
queue_tail++;
}
}
}
}
}
hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST);
hypre_TFree(queue, HYPRE_MEMORY_HOST);
hypre_TFree(GT_diag_mat, HYPRE_MEMORY_HOST);
/* fetch the communication information from */
comm = hypre_ParCSRMatrixComm(G_csr);
hypre_MPI_Comm_rank(comm, &mypid);
hypre_MPI_Comm_size(comm, &nprocs);
comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr);
if (nprocs == 1 && comm_pkg == NULL)
{
hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr);
comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr);
}
/* construct processor graph based on node-edge connection */
/* (local edges connected to neighbor processor nodes) */
n_children = 0;
nrecvs = nsends = 0;
if (nprocs > 1)
{
nsends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
proc_array = NULL;
if ((nsends+nrecvs) > 0)
{
n_proc_array = 0;
proc_array = hypre_TAlloc(HYPRE_Int, (nsends+nrecvs) , HYPRE_MEMORY_HOST);
for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i];
for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i];
hypre_qsort0(proc_array, 0, nsends+nrecvs-1);
n_proc_array = 1;
for (i = 1; i < nrecvs+nsends; i++)
if (proc_array[i] != proc_array[n_proc_array])
proc_array[n_proc_array++] = proc_array[i];
}
pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST);
recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1,
HYPRE_MPI_INT, comm);
pgraph_i[0] = 0;
for (i = 1; i <= nprocs; i++)
pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1];
pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs] , HYPRE_MEMORY_HOST);
hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j,
recv_cnts, pgraph_i, HYPRE_MPI_INT, comm);
hypre_TFree(recv_cnts, HYPRE_MEMORY_HOST);
/* BFS on the processor graph to determine parent and children */
nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST);
for (i = 0; i < nprocs; i++) nodes_marked[i] = -1;
queue = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST);
queue_head = 0;
queue_tail = 1;
node = 0;
queue[0] = node;
while ((queue_tail-queue_head) > 0)
{
proc = queue[queue_tail-1];
queue_tail--;
for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++)
{
proc2 = pgraph_j[i];
if (nodes_marked[proc2] < 0)
{
nodes_marked[proc2] = proc;
queue[queue_tail] = proc2;
queue_tail++;
}
}
}
parent = nodes_marked[mypid];
n_children = 0;
for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++;
if (n_children == 0) {n_children = 0; children = NULL;}
else
{
children = hypre_TAlloc(HYPRE_Int, n_children , HYPRE_MEMORY_HOST);
n_children = 0;
for (i = 0; i < nprocs; i++)
if (nodes_marked[i] == mypid) children[n_children++] = i;
}
hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST);
hypre_TFree(queue, HYPRE_MEMORY_HOST);
hypre_TFree(pgraph_i, HYPRE_MEMORY_HOST);
hypre_TFree(pgraph_j, HYPRE_MEMORY_HOST);
}
/* first, connection with my parent : if the edge in my parent *
* is incident to one of my nodes, then my parent will mark it */
found = 0;
for (i = 0; i < nrecvs; i++)
{
proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
if (proc == parent)
{
found = 1;
break;
}
}
/* but if all the edges connected to my parent are on my side, *
* then I will just pick one of them as tree edge */
if (found == 0)
{
for (i = 0; i < nsends; i++)
{
proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (proc == parent)
{
k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i);
edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k);
edges_marked[edge] = 1;
break;
}
}
}
/* next, if my processor has an edge incident on one node in my *
* child, put this edge on the tree. But if there is no such *
* edge, then I will assume my child will pick up an edge */
for (j = 0; j < n_children; j++)
{
proc = children[j];
for (i = 0; i < nsends; i++)
{
proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (proc == proc2)
{
k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i);
edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k);
edges_marked[edge] = 1;
break;
}
}
}
if (n_children > 0)
{
hypre_TFree(children, HYPRE_MEMORY_HOST);
}
/* count the size of the tree */
tree_size = 0;
for (i = 0; i < ncols_G; i++)
if (edges_marked[i] == 1) tree_size++;
t_indices = hypre_TAlloc(HYPRE_Int, (tree_size+1) , HYPRE_MEMORY_HOST);
t_indices[0] = tree_size;
tree_size = 1;
for (i = 0; i < ncols_G; i++)
if (edges_marked[i] == 1) t_indices[tree_size++] = i;
(*indices) = t_indices;
hypre_TFree(edges_marked, HYPRE_MEMORY_HOST);
if (G_type != 0)
{
hypre_TFree(G_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(G_diag_j, HYPRE_MEMORY_HOST);
}
}
/* -----------------------------------------------------------------------------
* extract submatrices based on given indices
* ----------------------------------------------------------------------------- */
void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr,
HYPRE_Int *indices2,
hypre_ParCSRMatrix ***submatrices )
{
HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs;
HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices;
HYPRE_BigInt *itmp_array;
HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag;
HYPRE_Int nrows, nnz;
HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts;
HYPRE_Int *diag_i, *diag_j, row, *offd_i;
HYPRE_Complex *A_diag_a, *diag_a;
hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr;
hypre_CSRMatrix *A_diag, *diag, *offd;
MPI_Comm comm;
/* -----------------------------------------------------
* first make sure the incoming indices are in order
* ----------------------------------------------------- */
nindices = indices2[0];
indices = &(indices2[1]);
hypre_qsort0(indices, 0, nindices-1);
/* -----------------------------------------------------
* fetch matrix information
* ----------------------------------------------------- */
nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr);
A_diag = hypre_ParCSRMatrixDiag(A_csr);
A_diag_i = hypre_CSRMatrixI(A_diag);
A_diag_j = hypre_CSRMatrixJ(A_diag);
A_diag_a = hypre_CSRMatrixData(A_diag);
comm = hypre_ParCSRMatrixComm(A_csr);
hypre_MPI_Comm_rank(comm, &mypid);
hypre_MPI_Comm_size(comm, &nprocs);
if (nprocs > 1)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n");
exit(1);
}
/* -----------------------------------------------------
* compute new matrix dimensions
* ----------------------------------------------------- */
proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST);
proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1,
HYPRE_MPI_INT, comm);
k = 0;
for (i = 0; i < nprocs; i++)
{
j = proc_offsets1[i];
proc_offsets1[i] = k;
k += j;
}
proc_offsets1[nprocs] = k;
itmp_array = hypre_ParCSRMatrixRowStarts(A_csr);
for (i = 0; i <= nprocs; i++)
{
proc_offsets2[i] = itmp_array[i] - proc_offsets1[i];
}
/* -----------------------------------------------------
* assign id's to row and col for later processing
* ----------------------------------------------------- */
exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST);
for (i = 0; i < nrows_A; i++) exp_indices[i] = -1;
for (i = 0; i < nindices; i++)
{
if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i;
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n");
exit(1);
}
}
k = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
exp_indices[i] = - k - 1;
k++;
}
}
/* -----------------------------------------------------
* compute number of nonzeros for each block
* ----------------------------------------------------- */
nnz11 = nnz12 = nnz21 = nnz22 = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0) nnz11++;
else nnz12++;
}
}
else
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0) nnz21++;
else nnz22++;
}
}
}
/* -----------------------------------------------------
* create A11 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz11;
/* This case is not yet implemented! */
global_nrows = 0;
global_ncols = 0;
row_starts = NULL;
col_starts = NULL;
A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0)
{
diag_j[nnz] = exp_indices[col];
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A11_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nrows; i++) offd_i[i] = 0;
offd = hypre_ParCSRMatrixOffd(A11_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
/* -----------------------------------------------------
* create A12 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz12;
global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs];
global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs];
row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = (HYPRE_BigInt)proc_offsets1[i];
col_starts[i] = (HYPRE_BigInt)proc_offsets2[i];
}
A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] < 0)
{
diag_j[nnz] = - exp_indices[col] - 1;
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
if (nnz > nnz_diag)
{
hypre_assert(0);
hypre_error(HYPRE_ERROR_GENERIC);
}
diag = hypre_ParCSRMatrixDiag(A12_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nrows; i++) offd_i[i] = 0;
offd = hypre_ParCSRMatrixOffd(A12_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
/* -----------------------------------------------------
* create A21 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz21;
global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs];
global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs];
row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = (HYPRE_BigInt)proc_offsets2[i];
col_starts[i] = (HYPRE_BigInt)proc_offsets1[i];
}
A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nrows_A - nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0)
{
diag_j[nnz] = exp_indices[col];
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A21_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nrows; i++) offd_i[i] = 0;
offd = hypre_ParCSRMatrixOffd(A21_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
/* -----------------------------------------------------
* create A22 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz22;
global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs];
global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs];
row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = (HYPRE_BigInt)proc_offsets2[i];
col_starts[i] = (HYPRE_BigInt)proc_offsets2[i];
}
A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nrows_A - nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] < 0)
{
diag_j[nnz] = - exp_indices[col] - 1;
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A22_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nrows; i++) offd_i[i] = 0;
offd = hypre_ParCSRMatrixOffd(A22_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
/* -----------------------------------------------------
* hand the matrices back to the caller and clean up
* ----------------------------------------------------- */
(*submatrices)[0] = A11_csr;
(*submatrices)[1] = A12_csr;
(*submatrices)[2] = A21_csr;
(*submatrices)[3] = A22_csr;
hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST);
hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST);
hypre_TFree(exp_indices, HYPRE_MEMORY_HOST);
}
/* -----------------------------------------------------------------------------
* extract submatrices of a rectangular matrix
* ----------------------------------------------------------------------------- */
void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr,
HYPRE_Int *indices2,
hypre_ParCSRMatrix ***submatrices )
{
HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs;
HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices;
HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag;
HYPRE_Int *A_offd_i, *A_offd_j;
HYPRE_Int nrows, nnz;
HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array;
HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd;
HYPRE_Complex *A_diag_a, *diag_a, *offd_a;
hypre_ParCSRMatrix *A11_csr, *A21_csr;
hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd;
MPI_Comm comm;
/* -----------------------------------------------------
* first make sure the incoming indices are in order
* ----------------------------------------------------- */
nindices = indices2[0];
indices = &(indices2[1]);
hypre_qsort0(indices, 0, nindices-1);
/* -----------------------------------------------------
* fetch matrix information
* ----------------------------------------------------- */
nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr);
A_diag = hypre_ParCSRMatrixDiag(A_csr);
A_diag_i = hypre_CSRMatrixI(A_diag);
A_diag_j = hypre_CSRMatrixJ(A_diag);
A_diag_a = hypre_CSRMatrixData(A_diag);
A_offd = hypre_ParCSRMatrixOffd(A_csr);
A_offd_i = hypre_CSRMatrixI(A_offd);
A_offd_j = hypre_CSRMatrixJ(A_offd);
comm = hypre_ParCSRMatrixComm(A_csr);
hypre_MPI_Comm_rank(comm, &mypid);
hypre_MPI_Comm_size(comm, &nprocs);
/* -----------------------------------------------------
* compute new matrix dimensions
* ----------------------------------------------------- */
proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST);
proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1,
HYPRE_MPI_INT, comm);
k = 0;
for (i = 0; i < nprocs; i++)
{
j = proc_offsets1[i];
proc_offsets1[i] = k;
k += j;
}
proc_offsets1[nprocs] = k;
itmp_array = hypre_ParCSRMatrixRowStarts(A_csr);
for (i = 0; i <= nprocs; i++)
proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]);
/* -----------------------------------------------------
* assign id's to row and col for later processing
* ----------------------------------------------------- */
exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST);
for (i = 0; i < nrows_A; i++) exp_indices[i] = -1;
for (i = 0; i < nindices; i++)
{
if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i;
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n");
exit(1);
}
}
k = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
exp_indices[i] = - k - 1;
k++;
}
}
/* -----------------------------------------------------
* compute number of nonzeros for each block
* ----------------------------------------------------- */
nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0) nnz11++;
}
nnz11_offd += A_offd_i[i+1] - A_offd_i[i];
}
else
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] < 0) nnz21++;
}
nnz21_offd += A_offd_i[i+1] - A_offd_i[i];
}
}
/* -----------------------------------------------------
* create A11 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr));
nnz_diag = nnz11;
nnz_offd = nnz11_offd;
global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs];
itmp_array = hypre_ParCSRMatrixColStarts(A_csr);
global_ncols = itmp_array[nprocs];
row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = (HYPRE_BigInt)proc_offsets1[i];
col_starts[i] = itmp_array[i];
}
A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0)
{
diag_j[nnz] = exp_indices[col];
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A11_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST);
offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
offd_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
offd_j[nnz] = A_offd_j[j];
offd_a[nnz++] = A_diag_a[j];
}
row++;
offd_i[row] = nnz;
}
}
offd = hypre_ParCSRMatrixOffd(A11_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixData(offd) = offd_a;
/* -----------------------------------------------------
* create A21 matrix
* ----------------------------------------------------- */
ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr));
nnz_offd = nnz21_offd;
nnz_diag = nnz21;
global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs];
itmp_array = hypre_ParCSRMatrixColStarts(A_csr);
global_ncols = itmp_array[nprocs];
row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = (HYPRE_BigInt)proc_offsets2[i];
col_starts[i] = itmp_array[i];
}
A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nrows_A - nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
diag_j[nnz] = A_diag_j[j];
diag_a[nnz++] = A_diag_a[j];
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A21_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST);
offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
offd_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
offd_j[nnz] = A_offd_j[j];
offd_a[nnz++] = A_diag_a[j];
}
row++;
offd_i[row] = nnz;
}
}
offd = hypre_ParCSRMatrixOffd(A21_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixData(offd) = offd_a;
/* -----------------------------------------------------
* hand the matrices back to the caller and clean up
* ----------------------------------------------------- */
(*submatrices)[0] = A11_csr;
(*submatrices)[1] = A21_csr;
hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST);
hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST);
hypre_TFree(exp_indices, HYPRE_MEMORY_HOST);
}
/* -----------------------------------------------------------------------------
* return the sum of all local elements of the matrix
* ----------------------------------------------------------------------------- */
HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A )
{
hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A );
hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A );
return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatAminvDB
* computes C = (A - inv(D)B) where D is a diagonal matrix
* Note: Data structure of A is expected to be a subset of data structure of B!
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B,
HYPRE_Complex *d,
hypre_ParCSRMatrix **C_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(B);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_ParCSRMatrix *C = NULL;
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
HYPRE_Int num_sends_B, num_recvs_B;
HYPRE_Int i, j, cnt;
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
hypre_CSRMatrix *C_diag = NULL;
hypre_CSRMatrix *C_offd = NULL;
HYPRE_Int *C_diag_i = NULL;
HYPRE_Int *C_diag_j = NULL;
HYPRE_Complex *C_diag_data = NULL;
HYPRE_Int *C_offd_i = NULL;
HYPRE_Int *C_offd_j = NULL;
HYPRE_Complex *C_offd_data = NULL;
HYPRE_Int num_procs, my_id;
HYPRE_Int *recv_procs_B;
HYPRE_Int *send_procs_B;
HYPRE_Int *recv_vec_starts_B;
HYPRE_Int *send_map_starts_B;
HYPRE_Int *send_map_elmts_B;
hypre_ParCSRCommPkg *comm_pkg_C;
HYPRE_Int *recv_procs_C;
HYPRE_Int *send_procs_C;
HYPRE_Int *recv_vec_starts_C;
HYPRE_Int *send_map_starts_C;
HYPRE_Int *send_map_elmts_C;
HYPRE_Int *map_to_B;
/*HYPRE_Int *C_diag_array;
HYPRE_Int *C_offd_array;*/
HYPRE_Complex *D_tmp;
HYPRE_Int size, rest, num_threads, ii;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads);
C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/
/*---------------------------------------------------------------------
* If there exists no CommPkg for B, a CommPkg is generated
*--------------------------------------------------------------------*/
if (!comm_pkg_B)
{
hypre_MatvecCommPkgCreate(B);
comm_pkg_B = hypre_ParCSRMatrixCommPkg(B);
}
C = hypre_ParCSRMatrixClone(B, 0);
/*hypre_ParCSRMatrixInitialize(C);*/
C_diag = hypre_ParCSRMatrixDiag(C);
C_diag_i = hypre_CSRMatrixI(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
C_offd = hypre_ParCSRMatrixOffd(C);
C_offd_i = hypre_CSRMatrixI(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST);
if (num_cols_offd_A)
{
map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST);
cnt = 0;
for (i=0; i < num_cols_offd_A; i++)
{
while (col_map_offd_B[cnt] < col_map_offd_A[i])
{
cnt++;
}
map_to_B[i] = cnt;
cnt++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii, i, j)
#endif
for (ii=0; ii < num_threads; ii++)
{
HYPRE_Int *A_marker = NULL;
HYPRE_Int ns, ne, A_col, num_cols, nmax;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
nmax = hypre_max(num_rows, num_cols_offd_B);
A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST);
for (i=0; i < num_rows; i++)
{
A_marker[i] = -1;
}
for (i = ns; i < ne; i++)
{
D_tmp[i] = 1.0/d[i];
}
num_cols = C_diag_i[ns];
for (i = ns; i < ne; i++)
{
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
A_col = A_diag_j[j];
if (A_marker[A_col] < C_diag_i[i])
{
A_marker[A_col] = num_cols;
C_diag_j[num_cols] = A_col;
C_diag_data[num_cols] = A_diag_data[j];
num_cols++;
}
else
{
C_diag_data[A_marker[A_col]] += A_diag_data[j];
}
}
for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++)
{
A_col = B_diag_j[j];
if (A_marker[A_col] < C_diag_i[i])
{
A_marker[A_col] = num_cols;
C_diag_j[num_cols] = A_col;
C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j];
num_cols++;
}
else
{
C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j];
}
}
}
for (i = 0; i < num_cols_offd_B; i++)
{
A_marker[i] = -1;
}
num_cols = C_offd_i[ns];
for (i = ns; i < ne; i++)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
A_col = map_to_B[A_offd_j[j]];
if (A_marker[A_col] < B_offd_i[i])
{
A_marker[A_col] = num_cols;
C_offd_j[num_cols] = A_col;
C_offd_data[num_cols] = A_offd_data[j];
num_cols++;
}
else
{
C_offd_data[A_marker[A_col]] += A_offd_data[j];
}
}
for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++)
{
A_col = B_offd_j[j];
if (A_marker[A_col] < B_offd_i[i])
{
A_marker[A_col] = num_cols;
C_offd_j[num_cols] = A_col;
C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j];
num_cols++;
}
else
{
C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j];
}
}
}
hypre_TFree(A_marker, HYPRE_MEMORY_HOST);
} /* end parallel region */
/*for (i=0; i < num_cols_offd_B; i++)
col_map_offd_C[i] = col_map_offd_B[i]; */
num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B);
num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B);
recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B);
recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B);
send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B);
send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B);
send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B);
recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST);
recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1, HYPRE_MEMORY_HOST);
send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST);
send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1, HYPRE_MEMORY_HOST);
send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_HOST);
for (i=0; i < num_recvs_B; i++)
recv_procs_C[i] = recv_procs_B[i];
for (i=0; i < num_recvs_B+1; i++)
recv_vec_starts_C[i] = recv_vec_starts_B[i];
for (i=0; i < num_sends_B; i++)
send_procs_C[i] = send_procs_B[i];
for (i=0; i < num_sends_B+1; i++)
send_map_starts_C[i] = send_map_starts_B[i];
for (i=0; i < send_map_starts_B[num_sends_B]; i++)
send_map_elmts_C[i] = send_map_elmts_B[i];
comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(comm_pkg_C) = comm;
hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B;
hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C;
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C;
hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B;
hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C;
hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C;
hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C;
hypre_TFree(D_tmp, HYPRE_MEMORY_HOST);
if (num_cols_offd_A) hypre_TFree(map_to_B, HYPRE_MEMORY_HOST);
*C_ptr = C;
return (hypre_error_flag);
}
/*--------------------------------------------------------------------------
* hypre_ParTMatmul:
*
* Multiplies two ParCSRMatrices transpose(A) and B and returns
* the product in ParCSRMatrix C
*
* Note that C does not own the partitionings since its row_starts
* is owned by A and col_starts by B.
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix*
hypre_ParTMatmul( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *AT_diag = NULL;
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *AT_offd = NULL;
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B);
HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A);
HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B);
HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag);
HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
hypre_ParCSRMatrix *C;
HYPRE_BigInt *col_map_offd_C = NULL;
HYPRE_Int *map_B_to_C;
hypre_CSRMatrix *C_diag = NULL;
hypre_CSRMatrix *C_tmp_diag = NULL;
HYPRE_Complex *C_diag_data = NULL;
HYPRE_Int *C_diag_i = NULL;
HYPRE_Int *C_diag_j = NULL;
HYPRE_BigInt first_col_diag_C;
HYPRE_BigInt last_col_diag_C;
hypre_CSRMatrix *C_offd = NULL;
hypre_CSRMatrix *C_tmp_offd = NULL;
hypre_CSRMatrix *C_int = NULL;
hypre_CSRMatrix *C_ext = NULL;
HYPRE_Int *C_ext_i;
HYPRE_BigInt *C_ext_j;
HYPRE_Complex *C_ext_data;
HYPRE_Int *C_ext_diag_i;
HYPRE_Int *C_ext_diag_j;
HYPRE_Complex *C_ext_diag_data;
HYPRE_Int *C_ext_offd_i;
HYPRE_Int *C_ext_offd_j;
HYPRE_Complex *C_ext_offd_data;
HYPRE_Int C_ext_size = 0;
HYPRE_Int C_ext_diag_size = 0;
HYPRE_Int C_ext_offd_size = 0;
HYPRE_Int *C_tmp_diag_i;
HYPRE_Int *C_tmp_diag_j;
HYPRE_Complex *C_tmp_diag_data;
HYPRE_Int *C_tmp_offd_i;
HYPRE_Int *C_tmp_offd_j;
HYPRE_Complex *C_tmp_offd_data;
HYPRE_Complex *C_offd_data=NULL;
HYPRE_Int *C_offd_i=NULL;
HYPRE_Int *C_offd_j=NULL;
HYPRE_BigInt *temp;
HYPRE_Int *send_map_starts_A;
HYPRE_Int *send_map_elmts_A;
HYPRE_Int num_sends_A;
HYPRE_Int num_cols_offd_C = 0;
HYPRE_Int *P_marker;
HYPRE_Int i, j;
HYPRE_Int i1, j_indx;
HYPRE_BigInt nrows_A, ncols_A;
HYPRE_BigInt nrows_B, ncols_B;
/*HYPRE_Int allsquare = 0;*/
HYPRE_Int cnt, cnt_offd, cnt_diag;
HYPRE_BigInt value;
HYPRE_Int num_procs, my_id;
HYPRE_Int max_num_threads;
HYPRE_Int *C_diag_array = NULL;
HYPRE_Int *C_offd_array = NULL;
HYPRE_BigInt first_row_index, first_col_diag;
HYPRE_Int local_num_rows, local_num_cols;
nrows_A = hypre_ParCSRMatrixGlobalNumRows(A);
ncols_A = hypre_ParCSRMatrixGlobalNumCols(A);
nrows_B = hypre_ParCSRMatrixGlobalNumRows(B);
ncols_B = hypre_ParCSRMatrixGlobalNumCols(B);
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
if (nrows_A != nrows_B || num_rows_diag_A != num_rows_diag_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n");
return NULL;
}
HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
/*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/
hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1);
hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1);
C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag);
C_ext_size = 0;
if (num_procs > 1)
{
hypre_CSRMatrix *C_int_diag;
hypre_CSRMatrix *C_int_offd;
void *request;
C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd);
C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag);
C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd);
hypre_ParCSRMatrixDiag(B) = C_int_diag;
hypre_ParCSRMatrixOffd(B) = C_int_offd;
C_int = hypre_MergeDiagAndOffd(B);
hypre_ParCSRMatrixDiag(B) = B_diag;
hypre_ParCSRMatrixOffd(B) = B_offd;
hypre_ExchangeExternalRowsInit(C_int, comm_pkg_A, &request);
C_ext = hypre_ExchangeExternalRowsWait(request);
C_ext_i = hypre_CSRMatrixI(C_ext);
C_ext_j = hypre_CSRMatrixBigJ(C_ext);
C_ext_data = hypre_CSRMatrixData(C_ext);
C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)];
hypre_CSRMatrixDestroy(C_int);
hypre_CSRMatrixDestroy(C_int_diag);
hypre_CSRMatrixDestroy(C_int_offd);
}
else
{
C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0);
hypre_CSRMatrixInitialize(C_tmp_offd);
hypre_CSRMatrixNumRownnz(C_tmp_offd) = 0;
}
hypre_CSRMatrixDestroy(AT_diag);
hypre_CSRMatrixDestroy(AT_offd);
/*-----------------------------------------------------------------------
* Add contents of C_ext to C_tmp_diag and C_tmp_offd
* to obtain C_diag and C_offd
*-----------------------------------------------------------------------*/
/* check for new nonzero columns in C_offd generated through C_ext */
first_col_diag_C = first_col_diag_B;
last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1;
C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag);
if (C_ext_size || num_cols_offd_B)
{
HYPRE_Int C_ext_num_rows;
num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A);
send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A);
send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A);
C_ext_num_rows = send_map_starts_A[num_sends_A];
C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST);
C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST);
temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size+num_cols_offd_B, HYPRE_MEMORY_HOST);
C_ext_diag_size = 0;
C_ext_offd_size = 0;
for (i = 0; i < C_ext_num_rows; i++)
{
for (j = C_ext_i[i]; j < C_ext_i[i+1]; j++)
{
if (C_ext_j[j] < first_col_diag_C ||
C_ext_j[j] > last_col_diag_C)
{
temp[C_ext_offd_size++] = C_ext_j[j];
}
else
{
C_ext_diag_size++;
}
}
C_ext_diag_i[i+1] = C_ext_diag_size;
C_ext_offd_i[i+1] = C_ext_offd_size;
}
cnt = C_ext_offd_size;
for (i = 0; i < num_cols_offd_B; i++)
{
temp[cnt++] = col_map_offd_B[i];
}
if (cnt)
{
hypre_BigQsort0(temp,0,cnt-1);
value = temp[0];
num_cols_offd_C = 1;
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
{
col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_cols_offd_C; i++)
{
col_map_offd_C[i] = temp[i];
}
hypre_TFree(temp, HYPRE_MEMORY_HOST);
if (C_ext_diag_size)
{
C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST);
C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (C_ext_offd_size)
{
C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST);
C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST);
}
C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag);
C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag);
C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd);
C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd);
C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd);
cnt_offd = 0;
cnt_diag = 0;
for (i = 0; i < C_ext_num_rows; i++)
{
for (j = C_ext_i[i]; j < C_ext_i[i+1]; j++)
{
if (C_ext_j[j] < first_col_diag_C ||
C_ext_j[j] > last_col_diag_C)
{
C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C,
C_ext_j[j],
num_cols_offd_C);
C_ext_offd_data[cnt_offd++] = C_ext_data[j];
}
else
{
C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C);
C_ext_diag_data[cnt_diag++] = C_ext_data[j];
}
}
}
}
if (C_ext)
{
hypre_CSRMatrixDestroy(C_ext);
C_ext = NULL;
}
if (num_cols_offd_B)
{
map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST);
cnt = 0;
for (i = 0; i < num_cols_offd_C; i++)
{
if (col_map_offd_C[i] == col_map_offd_B[cnt])
{
map_B_to_C[cnt++] = i;
if (cnt == num_cols_offd_B) break;
}
}
for (i = 0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++)
{
j_indx = C_tmp_offd_j[i];
C_tmp_offd_j[i] = map_B_to_C[j_indx];
}
}
/*-----------------------------------------------------------------------
* Need to compute:
* C_diag = C_tmp_diag + C_ext_diag
* C_offd = C_tmp_offd + C_ext_offd
*
* First generate structure
*-----------------------------------------------------------------------*/
if (C_ext_size || num_cols_offd_B)
{
C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C);
C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C);
C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int *B_marker_offd = NULL;
HYPRE_Int ik, jk, j1, j2, jcol;
HYPRE_Int ns, ne, ii, nnz_d, nnz_o;
HYPRE_Int rest, size;
HYPRE_Int num_threads = hypre_NumActiveThreads();
size = num_cols_diag_A/num_threads;
rest = num_cols_diag_A - size*num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST);
B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST);
for (ik = 0; ik < num_cols_diag_B; ik++)
{
B_marker[ik] = -1;
}
for (ik = 0; ik < num_cols_offd_C; ik++)
{
B_marker_offd[ik] = -1;
}
nnz_d = 0;
nnz_o = 0;
for (ik = ns; ik < ne; ik++)
{
for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++)
{
jcol = C_tmp_diag_j[jk];
B_marker[jcol] = ik;
nnz_d++;
}
for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++)
{
jcol = C_tmp_offd_j[jk];
B_marker_offd[jcol] = ik;
nnz_o++;
}
for (jk = 0; jk < num_sends_A; jk++)
{
for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++)
{
if (send_map_elmts_A[j1] == ik)
{
for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++)
{
jcol = C_ext_diag_j[j2];
if (B_marker[jcol] < ik)
{
B_marker[jcol] = ik;
nnz_d++;
}
}
for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++)
{
jcol = C_ext_offd_j[j2];
if (B_marker_offd[jcol] < ik)
{
B_marker_offd[jcol] = ik;
nnz_o++;
}
}
break;
}
}
}
C_diag_array[ii] = nnz_d;
C_offd_array[ii] = nnz_o;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii == 0)
{
nnz_d = 0;
nnz_o = 0;
for (ik = 0; ik < num_threads-1; ik++)
{
C_diag_array[ik+1] += C_diag_array[ik];
C_offd_array[ik+1] += C_offd_array[ik];
}
nnz_d = C_diag_array[num_threads-1];
nnz_o = C_offd_array[num_threads-1];
C_diag_i[num_cols_diag_A] = nnz_d;
C_offd_i[num_cols_diag_A] = nnz_o;
C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d);
C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o);
hypre_CSRMatrixI(C_diag) = C_diag_i;
hypre_CSRMatrixInitialize_v2(C_diag, 0, memory_location_C);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
hypre_CSRMatrixI(C_offd) = C_offd_i;
hypre_CSRMatrixInitialize_v2(C_offd, 0, memory_location_C);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*-----------------------------------------------------------------------
* Need to compute C_diag = C_tmp_diag + C_ext_diag
* and C_offd = C_tmp_offd + C_ext_offd !!!!
* Now fill in values
*-----------------------------------------------------------------------*/
for (ik = 0; ik < num_cols_diag_B; ik++)
{
B_marker[ik] = -1;
}
for (ik = 0; ik < num_cols_offd_C; ik++)
{
B_marker_offd[ik] = -1;
}
/*-----------------------------------------------------------------------
* Populate matrices
*-----------------------------------------------------------------------*/
nnz_d = 0;
nnz_o = 0;
if (ii)
{
nnz_d = C_diag_array[ii-1];
nnz_o = C_offd_array[ii-1];
}
for (ik = ns; ik < ne; ik++)
{
C_diag_i[ik] = nnz_d;
C_offd_i[ik] = nnz_o;
for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++)
{
jcol = C_tmp_diag_j[jk];
C_diag_j[nnz_d] = jcol;
C_diag_data[nnz_d] = C_tmp_diag_data[jk];
B_marker[jcol] = nnz_d;
nnz_d++;
}
for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++)
{
jcol = C_tmp_offd_j[jk];
C_offd_j[nnz_o] = jcol;
C_offd_data[nnz_o] = C_tmp_offd_data[jk];
B_marker_offd[jcol] = nnz_o;
nnz_o++;
}
for (jk = 0; jk < num_sends_A; jk++)
{
for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++)
{
if (send_map_elmts_A[j1] == ik)
{
for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++)
{
jcol = C_ext_diag_j[j2];
if (B_marker[jcol] < C_diag_i[ik])
{
C_diag_j[nnz_d] = jcol;
C_diag_data[nnz_d] = C_ext_diag_data[j2];
B_marker[jcol] = nnz_d;
nnz_d++;
}
else
{
C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2];
}
}
for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++)
{
jcol = C_ext_offd_j[j2];
if (B_marker_offd[jcol] < C_offd_i[ik])
{
C_offd_j[nnz_o] = jcol;
C_offd_data[nnz_o] = C_ext_offd_data[j2];
B_marker_offd[jcol] = nnz_o;
nnz_o++;
}
else
{
C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2];
}
}
break;
}
}
}
}
hypre_TFree(B_marker, HYPRE_MEMORY_HOST);
hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST);
} /*end parallel region */
hypre_TFree(C_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_offd_array, HYPRE_MEMORY_HOST);
}
/*C = hypre_ParCSRMatrixCreate(comm, ncols_A, ncols_B, col_starts_A,
col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd);
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */
/* row_starts[0] is start of local rows. row_starts[1] is start of next
processor's rows */
first_row_index = col_starts_A[0];
local_num_rows = (HYPRE_Int)(col_starts_A[1]-first_row_index );
first_col_diag = col_starts_B[0];
local_num_cols = (HYPRE_Int)(col_starts_B[1]-first_col_diag);
C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixComm(C) = comm;
hypre_ParCSRMatrixGlobalNumRows(C) = ncols_A;
hypre_ParCSRMatrixGlobalNumCols(C) = ncols_B;
hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index;
hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag;
hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1;
hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1;
hypre_ParCSRMatrixColMapOffd(C) = NULL;
hypre_ParCSRMatrixAssumedPartition(C) = NULL;
hypre_ParCSRMatrixRowStarts(C) = col_starts_A;
hypre_ParCSRMatrixColStarts(C) = col_starts_B;
hypre_ParCSRMatrixCommPkg(C) = NULL;
hypre_ParCSRMatrixCommPkgT(C) = NULL;
/* set defaults */
hypre_ParCSRMatrixOwnsData(C) = 1;
hypre_ParCSRMatrixRowindices(C) = NULL;
hypre_ParCSRMatrixRowvalues(C) = NULL;
hypre_ParCSRMatrixGetrowactive(C) = 0;
/* Note that C does not own the partitionings */
hypre_ParCSRMatrixSetRowStartsOwner(C,0);
hypre_ParCSRMatrixSetColStartsOwner(C,0);
if (C_diag)
{
hypre_CSRMatrixSetRownnz(C_diag);
hypre_ParCSRMatrixDiag(C) = C_diag;
}
else
{
hypre_ParCSRMatrixDiag(C) = C_tmp_diag;
}
if (C_offd)
{
hypre_CSRMatrixSetRownnz(C_offd);
hypre_ParCSRMatrixOffd(C) = C_offd;
}
else
{
hypre_ParCSRMatrixOffd(C) = C_tmp_offd;
}
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(C)) = memory_location_C;
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(C)) = memory_location_C;
if (num_cols_offd_C)
{
HYPRE_Int jj_count_offd, nnz_offd;
HYPRE_BigInt *new_col_map_offd_C = NULL;
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_offd_C; i++)
{
P_marker[i] = -1;
}
jj_count_offd = 0;
nnz_offd = C_offd_i[num_cols_diag_A];
for (i = 0; i < nnz_offd; i++)
{
i1 = C_offd_j[i];
if (P_marker[i1])
{
P_marker[i1] = 0;
jj_count_offd++;
}
}
if (jj_count_offd < num_cols_offd_C)
{
new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST);
jj_count_offd = 0;
for (i = 0; i < num_cols_offd_C; i++)
{
if (!P_marker[i])
{
P_marker[i] = jj_count_offd;
new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i];
}
}
for (i = 0; i < nnz_offd; i++)
{
i1 = C_offd_j[i];
C_offd_j[i] = P_marker[i1];
}
num_cols_offd_C = jj_count_offd;
hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST);
col_map_offd_C = new_col_map_offd_C;
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C;
/*-----------------------------------------------------------------------
* Free various arrays
*-----------------------------------------------------------------------*/
if (C_ext_size || num_cols_offd_B)
{
hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST);
}
if (C_ext_diag_size)
{
hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST);
}
if (C_ext_offd_size)
{
hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_B)
{
hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST);
}
if (C_diag)
{
hypre_CSRMatrixDestroy(C_tmp_diag);
}
if (C_offd)
{
hypre_CSRMatrixDestroy(C_tmp_offd);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if ( hypre_GetExecPolicy2(memory_location_A, memory_location_B) == HYPRE_EXEC_DEVICE )
{
hypre_CSRMatrixMoveDiagFirstDevice(hypre_ParCSRMatrixDiag(C));
hypre_SyncCudaComputeStream(hypre_handle());
}
#endif
return C;
}
HYPRE_Int
hypre_ParvecBdiagInvScal( hypre_ParVector *b,
HYPRE_Int blockSize,
hypre_ParVector **bs,
hypre_ParCSRMatrix *A)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(b);
HYPRE_Int num_procs, my_id;
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
HYPRE_Int i, j, s, block_start, block_end;
HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b);
HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b);
HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b);
HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */
HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize;
HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global );
hypre_assert(blockSize == A->bdiag_size);
HYPRE_Complex *bdiaginv = A->bdiaginv;
hypre_ParCSRCommPkg *comm_pkg = A->bdiaginv_comm_pkg;
HYPRE_Complex *dense = bdiaginv;
//for (i=first_row_block; i < end_row; i+=blockSize) ;
//printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i);
/* local vector of b */
hypre_Vector *b_local = hypre_ParVectorLocalVector(b);
HYPRE_Complex *b_local_data = hypre_VectorData(b_local);
/* number of sends (#procs) */
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* number of rows to send */
HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
/* number of recvs (#procs) */
HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
/* number of rows to recv */
HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_BigInt *part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_TMemcpy(part, hypre_ParVectorPartitioning(b), HYPRE_BigInt, 2,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b),
hypre_ParVectorGlobalSize(b), part );
hypre_ParVectorInitialize(bnew);
hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew);
HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local);
/* send and recv b */
HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST);
HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST);
for (i = 0; i < num_rows_send; i++)
{
j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i);
send_b[i] = b_local_data[j];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b);
/* ... */
hypre_ParCSRCommHandleDestroy(comm_handle);
for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize)
{
HYPRE_BigInt big_i;
block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global);
s = (HYPRE_Int)(block_end - block_start);
for (big_i = block_start; big_i < block_end; big_i++)
{
if (big_i < first_row || big_i >= end_row)
{
continue;
}
HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row);
HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start);
bnew_local_data[local_i] = 0.0;
for (j = 0; j < s; j++)
{
HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j;
HYPRE_Complex val = dense[block_i + j*blockSize];
if (val == 0.0)
{
continue;
}
if (global_rid >= first_row && global_rid < end_row)
{
HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row);
bnew_local_data[local_i] += val * b_local_data[rid];
}
else
{
HYPRE_Int rid;
if (global_rid < first_row)
{
rid = (HYPRE_Int)(global_rid - first_row_block);
}
else
{
rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row);
}
bnew_local_data[local_i] += val * recv_b[rid];
}
}
}
dense += blockSize * blockSize;
}
hypre_TFree(send_b, HYPRE_MEMORY_HOST);
hypre_TFree(recv_b, HYPRE_MEMORY_HOST);
*bs = bnew;
return hypre_error_flag;
}
/**
* @brief Compute As = B^{-1}*A, where B is the block diagonal of A
* @param[in] A :
* @param[in] blockSize: block size
* @param[out] B :
* @return
* @warning
*/
HYPRE_Int
hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A,
HYPRE_Int blockSize,
hypre_ParCSRMatrix **As)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int num_procs, my_id;
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
HYPRE_Int i, j, k, s;
HYPRE_BigInt block_start, block_end;
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A);
HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */
HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag);
HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A);
/* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */
HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local;
HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
void *request;
/* if square globally and locally */
HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) &&
(first_row == first_col);
if (nrow_global != ncol_global)
{
hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n");
return hypre_error_flag;
}
/* in block diagonals, row range of the blocks this proc span */
HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize;
HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global );
HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row / (HYPRE_BigInt)blockSize);
//for (i=first_row_block; i < end_row; i+=blockSize) ;
//printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i);
//return 0;
/* number of external rows */
HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row));
HYPRE_BigInt *ext_indices;
HYPRE_Int A_ext_nnz;
hypre_CSRMatrix *A_ext = NULL;
HYPRE_Complex *A_ext_a = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks*blockSize*blockSize, HYPRE_MEMORY_HOST);
HYPRE_Real *dense = dense_all;
HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST);
HYPRE_Complex *dgetri_work = NULL;
HYPRE_Int dgetri_lwork = -1, lapack_info;
HYPRE_Int num_cols_A_offd_new;
HYPRE_BigInt *col_map_offd_A_new;
HYPRE_BigInt big_i;
HYPRE_Int *offd2new = NULL;
HYPRE_Int *marker_diag, *marker_newoffd;
HYPRE_Int nnz_diag = A_diag_i[nrow_local];
HYPRE_Int nnz_offd = A_offd_i[nrow_local];
HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0;
HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new;
HYPRE_Complex *A_diag_a_new, *A_offd_a_new;
/* heuristic */
HYPRE_Int nnz_diag_alloc = 2 * nnz_diag;
HYPRE_Int nnz_offd_alloc = 2 * nnz_offd;
A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST);
A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST);
A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST);
A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST);
A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST);
A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrix *Anew;
hypre_CSRMatrix *Anew_diag;
hypre_CSRMatrix *Anew_offd;
HYPRE_BigInt *row_starts_new, *col_starts_new;
HYPRE_Real eps = 2.2e-16;
/* Start with extracting the external rows */
HYPRE_BigInt *ext_offd;
ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST);
j = 0;
for (big_i = first_row_block; big_i < first_row; big_i++)
{
ext_indices[j++] = big_i;
}
for (big_i = end_row; big_i < end_row_block; big_i++)
{
ext_indices[j++] = big_i;
}
hypre_assert(j == num_ext_rows);
/* create CommPkg for external rows */
hypre_ParCSRFindExtendCommPkg(comm, nrow_global, first_row, nrow_local, row_starts,
hypre_ParCSRMatrixAssumedPartition(A),
num_ext_rows, ext_indices, &A->bdiaginv_comm_pkg);
hypre_ParcsrGetExternalRowsInit(A, num_ext_rows, ext_indices, A->bdiaginv_comm_pkg, 1, &request);
A_ext = hypre_ParcsrGetExternalRowsWait(request);
hypre_TFree(ext_indices, HYPRE_MEMORY_HOST);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_a = hypre_CSRMatrixData(A_ext);
A_ext_nnz = A_ext_i[num_ext_rows];
ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST);
/* fint the offd incides in A_ext */
for (i = 0, j = 0; i < A_ext_nnz; i++)
{
/* global index */
HYPRE_BigInt cid = A_ext_j[i];
/* keep the offd indices */
if (cid < first_col || cid >= end_col)
{
ext_offd[j++] = cid;
}
}
/* remove duplicates after sorting (TODO better ways?) */
hypre_BigQsort0(ext_offd, 0, j-1);
for (i = 0, k = 0; i < j; i++)
{
if (i == 0 || ext_offd[i] != ext_offd[i-1])
{
ext_offd[k++] = ext_offd[i];
}
}
/* uniion these `k' new indices into col_map_offd_A */
col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST);
if (k)
{
/* map offd to offd_new */
offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd,
&num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL);
hypre_TFree(ext_offd, HYPRE_MEMORY_HOST);
/*
* adjust column indices in A_ext
*/
for (i = 0; i < A_ext_nnz; i++)
{
HYPRE_BigInt cid = A_ext_j[i];
if (cid < first_col || cid >= end_col)
{
j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new);
/* searching must succeed */
hypre_assert(j >= 0 && j < num_cols_A_offd_new);
/* trick: save ncol_local + j back */
A_ext_j[i] = ncol_local + j;
}
else
{
/* save local index: [0, ncol_local-1] */
A_ext_j[i] = cid - first_col;
}
}
/* marker for diag */
marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST);
for (i = 0; i < ncol_local; i++)
{
marker_diag[i] = -1;
}
/* marker for newoffd */
marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_A_offd_new; i++)
{
marker_newoffd[i] = -1;
}
/* outer most loop for blocks */
for (block_start = first_row_block; block_start < end_row_block; block_start += (HYPRE_BigInt)blockSize)
{
HYPRE_BigInt big_i;
block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global);
s = (HYPRE_Int)(block_end - block_start);
/* 1. fill the dense block diag matrix */
for (big_i = block_start; big_i < block_end; big_i++)
{
/* row index in this block */
HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start);
/* row index i: it can be local or external */
if (big_i >= first_row && big_i < end_row)
{
/* is a local row */
j = (HYPRE_Int)(big_i - first_row);
for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++)
{
HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col;
if (cid >= block_start && cid < block_end)
{
dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_diag_a[k];
}
}
if (num_cols_A_offd)
{
for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++)
{
HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]];
if (cid >= block_start && cid < block_end)
{
dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_offd_a[k];
}
}
}
}
else
{
/* is an external row */
if (big_i < first_row)
{
j = (HYPRE_Int)(big_i - first_row_block);
}
else
{
j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row);
}
for (k = A_ext_i[j]; k < A_ext_i[j+1]; k++)
{
HYPRE_BigInt cid = A_ext_j[k];
/* recover the global index */
cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid-ncol_local];
if (cid >= block_start && cid < block_end)
{
dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_ext_a[k];
}
}
}
}
/* 2. invert the dense matrix */
hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info);
hypre_assert(lapack_info == 0);
if (lapack_info == 0)
{
HYPRE_Int query = -1;
HYPRE_Real lwork_opt;
/* query the optimal size of work */
hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info);
hypre_assert(lapack_info == 0);
if (lwork_opt > dgetri_lwork)
{
dgetri_lwork = lwork_opt;
dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST);
}
hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info);
hypre_assert(lapack_info == 0);
}
/* filter out *zeros* */
HYPRE_Real Fnorm = 0.0;
for (i = 0; i < s; i++)
{
for (j = 0; j < s; j++)
{
HYPRE_Complex t = dense[j+i*blockSize];
Fnorm += t * t;
}
}
Fnorm = sqrt(Fnorm);
for (i = 0; i < s; i++)
{
for (j = 0; j < s; j++)
{
if ( hypre_abs(dense[j+i*blockSize]) < eps * Fnorm )
{
dense[j+i*blockSize] = 0.0;
}
}
}
/* 3. premultiplication: one-pass dynamic allocation */
for (big_i = block_start; big_i < block_end; big_i++)
{
/* starting points of this row in j */
HYPRE_Int diag_i_start = nnz_diag_new;
HYPRE_Int offd_i_start = nnz_offd_new;
/* compute a new row with global index 'i' and local index 'local_i' */
HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row);
/* row index in this block */
HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start);
if (big_i < first_row || big_i >= end_row)
{
continue;
}
/* if square^2: reserve the first space in diag part to the diag entry */
if (square2)
{
marker_diag[local_i] = nnz_diag_new;
if (nnz_diag_new == nnz_diag_alloc)
{
nnz_diag_alloc = nnz_diag_alloc * 2 + 1;
A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST);
A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST);
}
A_diag_j_new[nnz_diag_new] = local_i;
A_diag_a_new[nnz_diag_new] = 0.0;
nnz_diag_new ++;
}
/* combine s rows */
for (j = 0; j < s; j++)
{
/* row to combine: global row id */
HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j;
/* the multipiler */
HYPRE_Complex val = dense[block_i + j*blockSize];
if (val == 0.0)
{
continue;
}
if (global_rid >= first_row && global_rid < end_row)
{
/* this row is local */
HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row);
HYPRE_Int ii;
for (ii = A_diag_i[rid]; ii < A_diag_i[rid+1]; ii++)
{
HYPRE_Int col = A_diag_j[ii];
HYPRE_Complex vv = A_diag_a[ii];
if (marker_diag[col] < diag_i_start)
{
/* this col has not been seen before, create new entry */
marker_diag[col] = nnz_diag_new;
if (nnz_diag_new == nnz_diag_alloc)
{
nnz_diag_alloc = nnz_diag_alloc * 2 + 1;
A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST);
A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST);
}
A_diag_j_new[nnz_diag_new] = col;
A_diag_a_new[nnz_diag_new] = val * vv;
nnz_diag_new ++;
}
else
{
/* existing entry, update */
HYPRE_Int p = marker_diag[col];
hypre_assert(A_diag_j_new[p] == col);
A_diag_a_new[p] += val * vv;
}
}
for (ii = A_offd_i[rid]; ii < A_offd_i[rid+1]; ii++)
{
HYPRE_Int col = A_offd_j[ii];
/* use the mapper to map to new offd */
HYPRE_Int col_new = offd2new ? offd2new[col] : col;
HYPRE_Complex vv = A_offd_a[ii];
if (marker_newoffd[col_new] < offd_i_start)
{
/* this col has not been seen before, create new entry */
marker_newoffd[col_new] = nnz_offd_new;
if (nnz_offd_new == nnz_offd_alloc)
{
nnz_offd_alloc = nnz_offd_alloc * 2 + 1;
A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST);
A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST);
}
A_offd_j_new[nnz_offd_new] = col_new;
A_offd_a_new[nnz_offd_new] = val * vv;
nnz_offd_new ++;
}
else
{
/* existing entry, update */
HYPRE_Int p = marker_newoffd[col_new];
hypre_assert(A_offd_j_new[p] == col_new);
A_offd_a_new[p] += val * vv;
}
}
}
else
{
/* this is an external row: go to A_ext */
HYPRE_Int rid, ii;
if (global_rid < first_row)
{
rid = (HYPRE_Int)(global_rid - first_row_block);
}
else
{
rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row);
}
for (ii = A_ext_i[rid]; ii < A_ext_i[rid+1]; ii++)
{
HYPRE_Int col = (HYPRE_Int)A_ext_j[ii];
HYPRE_Complex vv = A_ext_a[ii];
if (col < ncol_local)
{
/* in diag part */
if (marker_diag[col] < diag_i_start)
{
/* this col has not been seen before, create new entry */
marker_diag[col] = nnz_diag_new;
if (nnz_diag_new == nnz_diag_alloc)
{
nnz_diag_alloc = nnz_diag_alloc * 2 + 1;
A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST);
A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST);
}
A_diag_j_new[nnz_diag_new] = col;
A_diag_a_new[nnz_diag_new] = val * vv;
nnz_diag_new ++;
}
else
{
/* existing entry, update */
HYPRE_Int p = marker_diag[col];
hypre_assert(A_diag_j_new[p] == col);
A_diag_a_new[p] += val * vv;
}
}
else
{
/* in offd part */
col -= ncol_local;
if (marker_newoffd[col] < offd_i_start)
{
/* this col has not been seen before, create new entry */
marker_newoffd[col] = nnz_offd_new;
if (nnz_offd_new == nnz_offd_alloc)
{
nnz_offd_alloc = nnz_offd_alloc * 2 + 1;
A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST);
A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST);
}
A_offd_j_new[nnz_offd_new] = col;
A_offd_a_new[nnz_offd_new] = val * vv;
nnz_offd_new ++;
}
else
{
/* existing entry, update */
HYPRE_Int p = marker_newoffd[col];
hypre_assert(A_offd_j_new[p] == col);
A_offd_a_new[p] += val * vv;
}
}
}
}
}
/* done for row local_i */
A_diag_i_new[local_i + 1] = nnz_diag_new;
A_offd_i_new[local_i + 1] = nnz_offd_new;
} /* for i, each row */
dense += blockSize * blockSize;
} /* for each block */
/* done with all rows */
/* resize properly */
A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST);
A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST);
A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST);
A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST);
/* readjust col_map_offd_new */
for (i = 0; i < num_cols_A_offd_new; i++)
{
marker_newoffd[i] = -1;
}
for (i = 0; i < nnz_offd_new; i++)
{
j = A_offd_j_new[i];
if (marker_newoffd[j] == -1)
{
marker_newoffd[j] = 1;
}
}
for (i = 0, j = 0; i < num_cols_A_offd_new; i++)
{
if (marker_newoffd[i] == 1)
{
col_map_offd_A_new[j] = col_map_offd_A_new[i];
marker_newoffd[i] = j++;
}
}
num_cols_A_offd_new = j;
for (i = 0; i < nnz_offd_new; i++)
{
j = marker_newoffd[A_offd_j_new[i]];
hypre_assert(j >= 0 && j < num_cols_A_offd_new);
A_offd_j_new[i] = j;
}
row_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST);
col_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST);
hypre_TMemcpy(row_starts_new, hypre_ParCSRMatrixRowStarts(A), HYPRE_BigInt, 2,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(col_starts_new, hypre_ParCSRMatrixColStarts(A), HYPRE_BigInt, 2,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
/* Now, we should have everything of Parcsr matrix As */
Anew = hypre_ParCSRMatrixCreate(comm,
nrow_global,
ncol_global,
row_starts_new,
col_starts_new,
num_cols_A_offd_new,
nnz_diag_new,
nnz_offd_new);
Anew_diag = hypre_ParCSRMatrixDiag(Anew);
hypre_CSRMatrixData(Anew_diag) = A_diag_a_new;
hypre_CSRMatrixI(Anew_diag) = A_diag_i_new;
hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new;
Anew_offd = hypre_ParCSRMatrixOffd(Anew);
hypre_CSRMatrixData(Anew_offd) = A_offd_a_new;
hypre_CSRMatrixI(Anew_offd) = A_offd_i_new;
hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new;
hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new;
hypre_ParCSRMatrixSetNumNonzeros(Anew);
hypre_ParCSRMatrixDNumNonzeros(Anew) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(Anew);
//printf("nnz_diag %d --> %d, nnz_offd %d --> %d\n", nnz_diag, nnz_diag_new, nnz_offd, nnz_offd_new);
/* create CommPkg of Anew */
hypre_MatvecCommPkgCreate(Anew);
*As = Anew;
/*
if (bdiaginv)
{
*bdiaginv = dense_all;
}
else
{
hypre_TFree(dense_all, HYPRE_MEMORY_HOST);
}
*/
/* save diagonal blocks in A */
A->bdiag_size = blockSize;
A->bdiaginv = dense_all;
/* free workspace */
hypre_TFree(IPIV, HYPRE_MEMORY_HOST);
hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST);
hypre_TFree(marker_diag, HYPRE_MEMORY_HOST);
hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST);
hypre_TFree(offd2new, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
HYPRE_Int
hypre_ParcsrGetExternalRowsInit( hypre_ParCSRMatrix *A,
HYPRE_Int indices_len,
HYPRE_BigInt *indices,
hypre_ParCSRCommPkg *comm_pkg,
HYPRE_Int want_data,
void **request_ptr)
{
HYPRE_Int i, j, k;
HYPRE_Int num_sends, num_rows_send, num_nnz_send, *send_i,
num_recvs, num_rows_recv, num_nnz_recv, *recv_i,
*send_jstarts, *recv_jstarts, *send_i_offset;
HYPRE_BigInt *send_j, *recv_j;
HYPRE_Complex *send_a = NULL, *recv_a = NULL;
hypre_ParCSRCommPkg *comm_pkg_j;
hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a;
/* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/* HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); */
/* HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); */
HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int num_procs;
HYPRE_Int my_id;
void **vrequest;
hypre_CSRMatrix *A_ext;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* number of sends (#procs) */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* number of rows to send */
num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
/* number of recvs (#procs) */
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
/* number of rows to recv */
num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs);
/* must be true if indices contains proper offd indices */
hypre_assert(indices_len == num_rows_recv);
/* send_i/recv_i:
* the arrays to send and recv: we first send and recv the row lengths */
send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST);
recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST);
/* fill the send array with row lengths */
for (i = 0, num_nnz_send = 0; i < num_rows_send; i++)
{
/* j: row index to send */
j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i);
send_i[i] = A_diag_i[j+1] - A_diag_i[j] + A_offd_i[j+1] - A_offd_i[j];
num_nnz_send += send_i[i];
}
/* send this array out: note the shift in recv_i by one (async) */
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1);
/* prepare data to send out. overlap with the above commmunication */
send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST);
if (want_data)
{
send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST);
}
send_i_offset = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_HOST);
send_i_offset[0] = 0;
hypre_TMemcpy(send_i_offset + 1, send_i, HYPRE_Int, num_rows_send,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
/* prefix sum. TODO: OMP parallelization */
for (i = 1; i <= num_rows_send; i++)
{
send_i_offset[i] += send_i_offset[i-1];
}
hypre_assert(send_i_offset[num_rows_send] == num_nnz_send);
/* pointers to each proc in send_j */
send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i <= num_sends; i++)
{
send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)];
}
hypre_assert(send_jstarts[num_sends] == num_nnz_send);
/* fill the CSR matrix: j and a */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k)
#endif
for (i = 0; i < num_rows_send; i++)
{
HYPRE_Int i1 = send_i_offset[i];
j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i);
/* open row j and fill ja and a to send */
for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++)
{
send_j[i1] = first_col + A_diag_j[k];
if (want_data)
{
send_a[i1] = A_diag_a[k];
}
i1++;
}
if (num_procs > 1)
{
for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++)
{
send_j[i1] = col_map_offd_A[A_offd_j[k]];
if (want_data)
{
send_a[i1] = A_offd_a[k];
}
i1++;
}
}
hypre_assert(send_i_offset[i+1] == i1);
}
/* finish the above communication: send_i/recv_i */
hypre_ParCSRCommHandleDestroy(comm_handle);
/* adjust recv_i to ptrs */
for (i = 1; i <= num_rows_recv; i++)
{
recv_i[i] += recv_i[i-1];
}
num_nnz_recv = recv_i[num_rows_recv];
recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST);
if (want_data)
{
recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST);
}
recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST);
for (i = 1; i <= num_recvs; i++)
{
j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i);
recv_jstarts[i] = recv_i[j];
}
/* ready to send and recv: create a communication package for data */
comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm (comm_pkg_j) = comm;
hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends;
hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg);
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts;
hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs;
hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts;
/* init communication */
/* ja */
comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j);
if (want_data)
{
/* a */
comm_handle_a = hypre_ParCSRCommHandleCreate(1, comm_pkg_j, send_a, recv_a);
}
else
{
comm_handle_a = NULL;
}
/* create A_ext */
A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv);
hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI (A_ext) = recv_i;
hypre_CSRMatrixBigJ(A_ext) = recv_j;
hypre_CSRMatrixData(A_ext) = recv_a;
/* output */
vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST);
vrequest[0] = (void *) comm_handle_j;
vrequest[1] = (void *) comm_handle_a;
vrequest[2] = (void *) A_ext;
vrequest[3] = (void *) comm_pkg_j;
*request_ptr = (void *) vrequest;
/* free */
hypre_TFree(send_i, HYPRE_MEMORY_HOST);
hypre_TFree(send_i_offset, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
hypre_CSRMatrix*
hypre_ParcsrGetExternalRowsWait(void *vrequest)
{
void **request = (void **) vrequest;
hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0];
hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1];
hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2];
hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3];
HYPRE_BigInt *send_j = (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j);
if (comm_handle_a)
{
HYPRE_Complex *send_a = (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a);
hypre_ParCSRCommHandleDestroy(comm_handle_a);
hypre_TFree(send_a, HYPRE_MEMORY_HOST);
}
hypre_ParCSRCommHandleDestroy(comm_handle_j);
hypre_TFree(send_j, HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST);
hypre_TFree(request, HYPRE_MEMORY_HOST);
return A_ext;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixAdd: performs C = alpha*A + beta*B
*
* A and B are assumed to have the same row and column partitionings
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixAdd( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
HYPRE_Complex beta,
hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix **C_ptr )
{
/* ParCSRMatrix data */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt num_rows_A = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols_A = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt num_rows_B = hypre_ParCSRMatrixGlobalNumRows(B);
HYPRE_BigInt num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B);
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *rownnz_diag_A = hypre_CSRMatrixRownnz(A_diag);
HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag);
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *rownnz_offd_A = hypre_CSRMatrixRownnz(A_offd);
HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd);
HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int *A2C_offd;
/* diag part of B */
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Int *rownnz_diag_B = hypre_CSRMatrixRownnz(B_diag);
HYPRE_Int num_rownnz_diag_B = hypre_CSRMatrixNumRownnz(B_diag);
HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag);
HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag);
/* off-diag part of B */
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *rownnz_offd_B = hypre_CSRMatrixRownnz(B_offd);
HYPRE_Int num_rownnz_offd_B = hypre_CSRMatrixNumRownnz(B_offd);
HYPRE_Int num_rows_offd_B = hypre_CSRMatrixNumRows(B_offd);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
HYPRE_Int *B2C_offd;
/* C data */
hypre_ParCSRMatrix *C;
HYPRE_BigInt *row_starts_C;
HYPRE_BigInt *col_starts_C;
hypre_CSRMatrix *C_diag;
hypre_CSRMatrix *C_offd;
HYPRE_BigInt *col_map_offd_C;
HYPRE_Int *C_diag_i, *C_offd_i;
HYPRE_Int *rownnz_diag_C = NULL;
HYPRE_Int *rownnz_offd_C = NULL;
HYPRE_Int num_rownnz_diag_C;
HYPRE_Int num_rownnz_offd_C;
HYPRE_Int num_rows_diag_C = num_rows_diag_A;
HYPRE_Int num_cols_diag_C = num_cols_diag_A;
HYPRE_Int num_rows_offd_C = num_rows_offd_A;
HYPRE_Int num_cols_offd_C = num_cols_offd_A + num_cols_offd_B;
HYPRE_Int *twspace;
HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
HYPRE_ANNOTATE_FUNC_BEGIN;
hypre_assert(num_rows_A == num_rows_B);
hypre_assert(num_cols_A == num_cols_B);
hypre_assert(num_rows_diag_A == num_rows_diag_B);
hypre_assert(num_cols_diag_A == num_cols_diag_B);
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location_C);
C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_offd_A + 1, memory_location_C);
col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
/* Compute num_cols_offd_C, A2C_offd, and B2C_offd*/
A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST);
B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST);
hypre_union2(num_cols_offd_A, col_map_offd_A,
num_cols_offd_B, col_map_offd_B,
&num_cols_offd_C, col_map_offd_C,
A2C_offd, B2C_offd);
/* Set nonzero rows data of diag_C */
num_rownnz_diag_C = num_rows_diag_A;
if ((num_rownnz_diag_A < num_rows_diag_A) &&
(num_rownnz_diag_B < num_rows_diag_B))
{
hypre_MergeOrderedArrays( num_rownnz_diag_A, rownnz_diag_A,
num_rownnz_diag_B, rownnz_diag_B,
&num_rownnz_diag_C, &rownnz_diag_C);
}
/* Set nonzero rows data of offd_C */
num_rownnz_offd_C = num_rows_offd_A;
if ((num_rownnz_offd_A < num_rows_offd_A) &&
(num_rownnz_offd_B < num_rows_offd_B))
{
hypre_MergeOrderedArrays( num_rownnz_offd_A, rownnz_offd_A,
num_rownnz_offd_B, rownnz_offd_B,
&num_rownnz_offd_C, &rownnz_offd_C);
}
/* Set diag_C */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ii, num_threads;
HYPRE_Int size, rest, ns, ne;
HYPRE_Int *marker_diag;
HYPRE_Int *marker_offd;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
/*-----------------------------------------------------------------------
* Compute C_diag = alpha*A_diag + beta*B_diag
*-----------------------------------------------------------------------*/
size = num_rownnz_diag_C/num_threads;
rest = num_rownnz_diag_C - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
marker_diag = hypre_TAlloc(HYPRE_Int, num_cols_diag_A, HYPRE_MEMORY_HOST);
hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_diag,
NULL, NULL, A_diag, B_diag,
num_rows_diag_C, num_rownnz_diag_C,
num_cols_diag_C, rownnz_diag_C,
memory_location_C, C_diag_i, &C_diag);
hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_diag,
NULL, NULL, rownnz_diag_C,
alpha, beta, A_diag, B_diag, C_diag);
hypre_TFree(marker_diag, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Compute C_offd = alpha*A_offd + beta*B_offd
*-----------------------------------------------------------------------*/
size = num_rownnz_offd_C/num_threads;
rest = num_rownnz_offd_C - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST);
hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_offd,
A2C_offd, B2C_offd, A_offd, B_offd,
num_rows_offd_C, num_rownnz_offd_C,
num_cols_offd_C, rownnz_offd_C,
memory_location_C, C_offd_i, &C_offd);
hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_offd,
A2C_offd, B2C_offd, rownnz_offd_C,
alpha, beta, A_offd, B_offd, C_offd);
hypre_TFree(marker_offd, HYPRE_MEMORY_HOST);
} /* end of omp parallel region */
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST);
hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST);
/* Create ParCSRMatrix C */
row_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
col_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_TMemcpy(row_starts_C, hypre_ParCSRMatrixRowStarts(A), HYPRE_BigInt, 2,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(col_starts_C, hypre_ParCSRMatrixColStarts(A), HYPRE_BigInt, 2,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
C = hypre_ParCSRMatrixCreate(comm,
num_rows_A,
num_cols_A,
row_starts_C,
col_starts_C,
num_cols_offd_C,
hypre_CSRMatrixNumNonzeros(C_diag),
hypre_CSRMatrixNumNonzeros(C_offd));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C));
hypre_ParCSRMatrixDiag(C) = C_diag;
hypre_ParCSRMatrixOffd(C) = C_offd;
hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C;
hypre_ParCSRMatrixSetNumNonzeros(C);
hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C);
/* create CommPkg of C */
hypre_MatvecCommPkgCreate(C);
*C_ptr = C;
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFnorm
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_ParCSRMatrixFnorm( hypre_ParCSRMatrix *A )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Real f_diag, f_offd, local_result, result;
f_diag = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixDiag(A));
f_offd = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixOffd(A));
local_result = f_diag * f_diag + f_offd * f_offd;
hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm);
return sqrt(result);
}
/*--------------------------------------------------------------------------
* hypre_ExchangeExternalRowsInit
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ExchangeExternalRowsInit( hypre_CSRMatrix *B_ext,
hypre_ParCSRCommPkg *comm_pkg_A,
void **request_ptr)
{
MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A);
HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A);
HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A);
HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A);
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A);
HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A);
HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A);
HYPRE_Int num_elmts_send = send_map_starts[num_sends];
HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs];
HYPRE_Int *B_ext_i = B_ext ? hypre_CSRMatrixI(B_ext) : NULL;
HYPRE_BigInt *B_ext_j = B_ext ? hypre_CSRMatrixBigJ(B_ext) : NULL;
HYPRE_Complex *B_ext_data = B_ext ? hypre_CSRMatrixData(B_ext) : NULL;
HYPRE_Int B_ext_ncols = B_ext ? hypre_CSRMatrixNumCols(B_ext) : 0;
HYPRE_Int B_ext_nrows = B_ext ? hypre_CSRMatrixNumRows(B_ext) : 0;
HYPRE_Int *B_ext_rownnz = hypre_CTAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST);
hypre_assert(num_elmts_recv == B_ext_nrows);
/* output matrix */
hypre_CSRMatrix *B_int;
HYPRE_Int B_int_nrows = num_elmts_send;
HYPRE_Int B_int_ncols = B_ext_ncols;
HYPRE_Int *B_int_i = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST);
HYPRE_BigInt *B_int_j = NULL;
HYPRE_Complex *B_int_data = NULL;
HYPRE_Int B_int_nnz;
hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a;
hypre_ParCSRCommPkg *comm_pkg_j;
HYPRE_Int *jdata_recv_vec_starts;
HYPRE_Int *jdata_send_map_starts;
HYPRE_Int i;
HYPRE_Int num_procs;
void **vrequest;
hypre_MPI_Comm_size(comm, &num_procs);
jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
/*--------------------------------------------------------------------------
* B_ext_rownnz contains the number of elements of row j
* (to be determined through send_map_elmnts on the receiving end)
*--------------------------------------------------------------------------*/
for (i = 0; i < B_ext_nrows; i++)
{
B_ext_rownnz[i] = B_ext_i[i+1] - B_ext_i[i];
}
/*--------------------------------------------------------------------------
* initialize communication: send/recv the row nnz
* (note the use of comm_pkg_A, mode 12, as in transpose matvec
*--------------------------------------------------------------------------*/
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz, B_int_i + 1);
jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST);
jdata_recv_vec_starts[0] = 0;
for (i = 1; i <= num_recvs; i++)
{
jdata_recv_vec_starts[i] = B_ext_i[recv_vec_starts[i]];
}
comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(comm_pkg_j) = comm;
hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs;
hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends;
hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs;
hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs;
hypre_ParCSRCommHandleDestroy(comm_handle);
/*--------------------------------------------------------------------------
* compute B_int: row nnz to row ptrs
*--------------------------------------------------------------------------*/
B_int_i[0] = 0;
for (i = 1; i <= B_int_nrows; i++)
{
B_int_i[i] += B_int_i[i-1];
}
B_int_nnz = B_int_i[B_int_nrows];
B_int_j = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_HOST);
B_int_data = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_HOST);
for (i = 0; i <= num_sends; i++)
{
jdata_send_map_starts[i] = B_int_i[send_map_starts[i]];
}
/* note the order of send/recv is reversed */
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts;
/* send/recv CSR rows */
comm_handle_a = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, B_ext_data, B_int_data);
comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, B_ext_j, B_int_j);
/* create CSR */
B_int = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz);
hypre_CSRMatrixMemoryLocation(B_int) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(B_int) = B_int_i;
hypre_CSRMatrixBigJ(B_int) = B_int_j;
hypre_CSRMatrixData(B_int) = B_int_data;
/* output */
vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST);
vrequest[0] = (void *) comm_handle_j;
vrequest[1] = (void *) comm_handle_a;
vrequest[2] = (void *) B_int;
vrequest[3] = (void *) comm_pkg_j;
*request_ptr = (void *) vrequest;
hypre_TFree(B_ext_rownnz, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ExchangeExternalRowsWait
*--------------------------------------------------------------------------*/
hypre_CSRMatrix*
hypre_ExchangeExternalRowsWait(void *vrequest)
{
void **request = (void **) vrequest;
hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0];
hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1];
hypre_CSRMatrix *B_int = (hypre_CSRMatrix *) request[2];
hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3];
/* communication done */
hypre_ParCSRCommHandleDestroy(comm_handle_a);
hypre_ParCSRCommHandleDestroy(comm_handle_j);
hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST);
hypre_TFree(request, HYPRE_MEMORY_HOST);
return B_int;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixExtractSubmatrixFC
*
* extract submatrix A_{FF}, A_{FC}, A_{CF} or A_{CC}
* char job[2] = "FF", "FC", "CF" or "CC"
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixExtractSubmatrixFC( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *cpts_starts_in,
const char *job,
hypre_ParCSRMatrix **B_ptr,
HYPRE_Real strength_thresh)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
hypre_ParCSRMatrix *B;
hypre_CSRMatrix *B_diag, *B_offd;
HYPRE_Real *B_maxel_row;
HYPRE_Int *B_diag_i, *B_diag_j, *B_offd_i, *B_offd_j;
HYPRE_Complex *B_diag_a, *B_offd_a;
HYPRE_Int num_cols_B_offd;
HYPRE_BigInt *col_map_offd_B;
HYPRE_Int i, j, k, k1, k2;
HYPRE_BigInt B_nrow_global, B_ncol_global;
HYPRE_Int A_nlocal, B_nrow_local, B_ncol_local,
B_nnz_diag, B_nnz_offd;
HYPRE_BigInt total_global_fpts, total_global_cpts, *fpts_starts, *cpts_starts;
HYPRE_Int nf_local, nc_local;
HYPRE_Int row_set, col_set;
HYPRE_BigInt *B_row_starts, *B_col_starts, B_first_col;
HYPRE_Int my_id, num_procs,
*sub_idx_diag, *sub_idx_offd;
HYPRE_Int num_sends, *send_buf_data;
/* MPI size and rank*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
row_set = job[0] == 'F' ? -1 : 1;
col_set = job[1] == 'F' ? -1 : 1;
A_nlocal = hypre_CSRMatrixNumRows(A_diag);
/*-------------- global number of C points and local C points
* assuming cpts_starts is given */
if (row_set == 1 || col_set == 1)
{
/* copy cpts_starts first */
HYPRE_Int len;
len = 2;
cpts_starts = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST);
hypre_TMemcpy(cpts_starts, cpts_starts_in, HYPRE_BigInt, len,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
if (my_id == (num_procs -1))
{
total_global_cpts = cpts_starts[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
nc_local = (HYPRE_Int)(cpts_starts[1] - cpts_starts[0]);
}
/*-------------- global number of F points, local F points, and F starts */
if (row_set == -1 || col_set == -1)
{
nf_local = 0;
for (i = 0; i < A_nlocal; i++)
{
if (CF_marker[i] < 0)
{
nf_local++;
}
}
fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_MPI_Scan(&nf_local, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
fpts_starts[0] = fpts_starts[1] - nf_local;
if (my_id == num_procs - 1)
{
total_global_fpts = fpts_starts[1];
}
hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
}
if (row_set == -1 && col_set == -1)
{
/* FF */
B_nrow_local = nf_local;
B_ncol_local = nf_local;
B_nrow_global = total_global_fpts;
B_ncol_global = total_global_fpts;
B_row_starts = B_col_starts = fpts_starts;
}
else if (row_set == -1 && col_set == 1)
{
/* FC */
B_nrow_local = nf_local;
B_ncol_local = nc_local;
B_nrow_global = total_global_fpts;
B_ncol_global = total_global_cpts;
B_row_starts = fpts_starts;
B_col_starts = cpts_starts;
}
else if (row_set == 1 && col_set == -1)
{
/* CF */
B_nrow_local = nc_local;
B_ncol_local = nf_local;
B_nrow_global = total_global_cpts;
B_ncol_global = total_global_fpts;
B_row_starts = cpts_starts;
B_col_starts = fpts_starts;
}
else
{
/* CC */
B_nrow_local = nc_local;
B_ncol_local = nc_local;
B_nrow_global = total_global_cpts;
B_ncol_global = total_global_cpts;
B_row_starts = B_col_starts = cpts_starts;
}
/* global index of my first col */
B_first_col = B_col_starts[0];
/* sub_idx_diag: [local] mapping from F+C to F/C, if not selected, be -1 */
sub_idx_diag = hypre_TAlloc(HYPRE_Int, A_nlocal, HYPRE_MEMORY_HOST);
for (i = 0, k = 0; i < A_nlocal; i++)
{
HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1;
if (CF_i == col_set)
{
sub_idx_diag[i] = k++;
}
else
{
sub_idx_diag[i] = -1;
}
}
hypre_assert(k == B_ncol_local);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_buf_data = hypre_TAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
k = 0;
for (i = 0; i < num_sends; i++)
{
/* start pos of elements sent to send_proc[i] */
HYPRE_Int si = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
HYPRE_Int ei = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1);
/* loop through all elems to send_proc[i] */
for (j = si; j < ei; j++)
{
/* j1: local idx */
HYPRE_Int j1 = sub_idx_diag[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
if (j1 != -1)
{
/* adjust j1 to B global idx */
j1 += B_first_col;
}
send_buf_data[k++] = j1;
}
}
hypre_assert(k == hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
/* recv buffer */
sub_idx_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
/* create a handle to start communication. 11: for integer */
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_buf_data, sub_idx_offd);
/* destroy the handle to finish communication */
hypre_ParCSRCommHandleDestroy(comm_handle);
for (i = 0, num_cols_B_offd = 0; i < num_cols_A_offd; i++)
{
if (sub_idx_offd[i] != -1)
{
num_cols_B_offd ++;
}
}
col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_B_offd, HYPRE_MEMORY_HOST);
for (i = 0, k = 0; i < num_cols_A_offd; i++)
{
if (sub_idx_offd[i] != -1)
{
col_map_offd_B[k] = sub_idx_offd[i];
sub_idx_offd[i] = k++;
}
}
hypre_assert(k == num_cols_B_offd);
/* count nnz and set ia */
B_nnz_diag = B_nnz_offd = 0;
B_maxel_row = hypre_TAlloc(HYPRE_Real, B_nrow_local, HYPRE_MEMORY_HOST);
B_diag_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST);
B_offd_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST);
B_diag_i[0] = B_offd_i[0] = 0;
for (i = 0, k = 0; i < A_nlocal; i++)
{
HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1;
if (CF_i != row_set)
{
continue;
}
k++;
// Get max abs-value element of this row
HYPRE_Real temp_max = 0;
if (strength_thresh > 0) {
for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++)
{
if (hypre_cabs(A_diag_a[j]) > temp_max)
{
temp_max = hypre_cabs(A_diag_a[j]);
}
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
if (hypre_cabs(A_offd_a[j]) > temp_max)
{
temp_max = hypre_cabs(A_offd_a[j]);
}
}
}
B_maxel_row[k-1] = temp_max;
// add one for diagonal element
j = A_diag_i[i];
if (sub_idx_diag[A_diag_j[j]] != -1)
{
B_nnz_diag++;
}
// Count nnzs larger than tolerance times max row element
for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++)
{
if ( (sub_idx_diag[A_diag_j[j]] != -1) &&
(hypre_cabs(A_diag_a[j]) > (strength_thresh*temp_max)) )
{
B_nnz_diag++;
}
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
if ( (sub_idx_offd[A_offd_j[j]] != -1) &&
(hypre_cabs(A_offd_a[j]) > (strength_thresh*temp_max)) )
{
B_nnz_offd++;
}
}
B_diag_i[k] = B_nnz_diag;
B_offd_i[k] = B_nnz_offd;
}
hypre_assert(k == B_nrow_local);
B_diag_j = hypre_TAlloc(HYPRE_Int, B_nnz_diag, HYPRE_MEMORY_HOST);
B_diag_a = hypre_TAlloc(HYPRE_Complex, B_nnz_diag, HYPRE_MEMORY_HOST);
B_offd_j = hypre_TAlloc(HYPRE_Int, B_nnz_offd, HYPRE_MEMORY_HOST);
B_offd_a = hypre_TAlloc(HYPRE_Complex, B_nnz_offd, HYPRE_MEMORY_HOST);
for (i = 0, k=0, k1 = 0, k2 = 0; i < A_nlocal; i++)
{
HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1;
if (CF_i != row_set)
{
continue;
}
HYPRE_Real maxel = B_maxel_row[k];
k++;
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
HYPRE_Int j1 = sub_idx_diag[A_diag_j[j]];
if ( (j1 != -1) && ( (hypre_cabs(A_diag_a[j]) > (strength_thresh*maxel)) || j==A_diag_i[i] ) )
{
B_diag_j[k1] = j1;
B_diag_a[k1] = A_diag_a[j];
k1++;
}
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
HYPRE_Int j1 = sub_idx_offd[A_offd_j[j]];
if ((j1 != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*maxel)))
{
hypre_assert(j1 >= 0 && j1 < num_cols_B_offd);
B_offd_j[k2] = j1;
B_offd_a[k2] = A_offd_a[j];
k2++;
}
}
}
hypre_assert(k1 == B_nnz_diag && k2 == B_nnz_offd);
/* ready to create B = A(rowset, colset) */
B = hypre_ParCSRMatrixCreate(comm,
B_nrow_global,
B_ncol_global,
B_row_starts,
B_col_starts,
num_cols_B_offd,
B_nnz_diag,
B_nnz_offd);
B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrixMemoryLocation(B_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixData(B_diag) = B_diag_a;
hypre_CSRMatrixI(B_diag) = B_diag_i;
hypre_CSRMatrixJ(B_diag) = B_diag_j;
B_offd = hypre_ParCSRMatrixOffd(B);
hypre_CSRMatrixMemoryLocation(B_offd) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixData(B_offd) = B_offd_a;
hypre_CSRMatrixI(B_offd) = B_offd_i;
hypre_CSRMatrixJ(B_offd) = B_offd_j;
hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B;
hypre_ParCSRMatrixSetNumNonzeros(B);
hypre_ParCSRMatrixDNumNonzeros(B) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(B);
hypre_MatvecCommPkgCreate(B);
*B_ptr = B;
hypre_TFree(B_maxel_row, HYPRE_MEMORY_HOST);
hypre_TFree(send_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(sub_idx_diag, HYPRE_MEMORY_HOST);
hypre_TFree(sub_idx_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <cinttypes>
#include <iostream>
#include <type_traits>
#include <algorithm>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
public:
Neighborhood(NodeID_ n, DestID_** g_index) : n_(n), g_index_(g_index) {}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_]; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
if (out_index_ != nullptr)
num_edges_ = out_index_[num_nodes_] - out_index_[0];
else
num_edges_ = in_index_[num_nodes_] - in_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n) const {
return Neighborhood(n, out_index_);
}
// Is m a neighbor of n?
bool isNeighbor(NodeID_ n, NodeID_ m) const {
return std::binary_search(out_index_[n], out_index_[n+1], m);
}
Neighborhood in_neigh(NodeID_ n) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
#if 0
static DestID_** relabelIndex(const pvector<SGOffset> &offsets, DestID_* neighs, std::map<NodeID_, int64_t> reMap) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
#endif
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
DestID_** returnOffsetsArray()
{
//PageRank specific
return in_index_;
}
DestID_* returnCoordsArray()
{
//PageRank specific
return in_neighbors_;
}
DestID_** returnOffsetsArrayForCSR()
{
//PageRank specific
return out_index_;
}
DestID_* returnCoordsArrayForCSR()
{
//PageRank specific
return out_neighbors_;
}
DestID_ accessOutNeighArray(NodeID_ pos)
{
return out_neighbors_[pos];
}
DestID_ accessInNeighArray(NodeID_ pos)
{
return in_neighbors_[pos];
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
image-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickCore Image View Methods %
% %
% Software Design %
% John Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/MagickCore.h"
#include "magick/exception-private.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Typedef declarations.
*/
struct _ImageView
{
char
*description;
RectangleInfo
extent;
Image
*image;
CacheView
*view;
size_t
number_threads;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageView() makes a copy of the specified image view.
%
% The format of the CloneImageView method is:
%
% ImageView *CloneImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *CloneImageView(const ImageView *image_view)
{
ImageView
*clone_view;
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
clone_view=(ImageView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->description=ConstantString(image_view->description);
clone_view->extent=image_view->extent;
clone_view->view=CloneCacheView(image_view->view);
clone_view->number_threads=image_view->number_threads;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,image_view->exception);
clone_view->debug=image_view->debug;
clone_view->signature=MagickSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageView() deallocates memory associated with a image view.
%
% The format of the DestroyImageView method is:
%
% ImageView *DestroyImageView(ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *DestroyImageView(ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
if (image_view->description != (char *) NULL)
image_view->description=DestroyString(image_view->description);
image_view->view=DestroyCacheView(image_view->view);
image_view->exception=DestroyExceptionInfo(image_view->exception);
image_view->signature=(~MagickSignature);
image_view=(ImageView *) RelinquishMagickMemory(image_view);
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferImageViewIterator() iterates over three image views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination image view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source,
% const ImageView *duplex,ImageView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferImageViewIterator method is:
%
% MagickBooleanType DuplexTransferImageViewIterator(ImageView *source,
% ImageView *duplex,ImageView *destination,
% DuplexTransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o duplex: the duplex image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType DuplexTransferImageViewIterator(
ImageView *source,ImageView *duplex,ImageView *destination,
DuplexTransferImageViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickSignature);
if (transfer == (DuplexTransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const PixelPacket
*restrict duplex_pixels,
*restrict pixels;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DuplexTransferImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticIndexes() returns the image view authentic indexes.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% IndexPacket *GetImageViewAuthenticIndexes(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport IndexPacket *GetImageViewAuthenticIndexes(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(GetCacheViewAuthenticIndexQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticPixels() returns the image view authentic pixels.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% PixelPacket *GetImageViewAuthenticPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport PixelPacket *GetImageViewAuthenticPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(GetCacheViewAuthenticPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a image view.
%
% The format of the GetImageViewException method is:
%
% char *GetImageViewException(const PixelImage *image_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o image_view: the pixel image_view.
%
% o severity: the severity of the error is returned here.
%
*/
MagickExport char *GetImageViewException(const ImageView *image_view,
ExceptionType *severity)
{
char
*description;
assert(image_view != (const ImageView *) NULL);
assert(image_view->signature == MagickSignature);
assert(severity != (ExceptionType *) NULL);
*severity=image_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*description='\0';
if (image_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->reason),
MaxTextExtent);
if (image_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewExtent() returns the image view extent.
%
% The format of the GetImageViewExtent method is:
%
% RectangleInfo GetImageViewExtent(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(image_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewImage() returns the image associated with the image view.
%
% The format of the GetImageViewImage method is:
%
% MagickCore *GetImageViewImage(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Image *GetImageViewImage(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(image_view->image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewIterator() iterates over the image view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetImageViewIterator method is:
%
% MagickBooleanType GetImageViewIterator(ImageView *source,
% GetImageViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType GetImageViewIterator(ImageView *source,
GetImageViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickSignature);
if (get == (GetImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualIndexes() returns the image view virtual indexes.
%
% The format of the GetImageViewVirtualIndexes method is:
%
% const IndexPacket *GetImageViewVirtualIndexes(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const IndexPacket *GetImageViewVirtualIndexes(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(GetCacheViewVirtualIndexQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualPixels() returns the image view virtual pixels.
%
% The format of the GetImageViewVirtualPixels method is:
%
% const PixelPacket *GetImageViewVirtualPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const PixelPacket *GetImageViewVirtualPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(GetCacheViewVirtualPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageView() returns MagickTrue if the the parameter is verified as a image
% view object.
%
% The format of the IsImageView method is:
%
% MagickBooleanType IsImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport MagickBooleanType IsImageView(const ImageView *image_view)
{
if (image_view == (const ImageView *) NULL)
return(MagickFalse);
if (image_view->signature != MagickSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageView() returns a image view required for all other methods in the
% Image View API.
%
% The format of the NewImageView method is:
%
% ImageView *NewImageView(MagickCore *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
MagickExport ImageView *NewImageView(Image *image)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view));
if (image_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->image=image;
image_view->view=AcquireCacheView(image_view->image);
image_view->extent.width=image->columns;
image_view->extent.height=image->rows;
image_view->extent.x=0;
image_view->extent.y=0;
image_view->number_threads=GetOpenMPMaximumThreads();
image_view->exception=AcquireExceptionInfo();
image_view->debug=IsEventLogging();
image_view->signature=MagickSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageViewRegion() returns a image view required for all other methods
% in the Image View API.
%
% The format of the NewImageViewRegion method is:
%
% ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view));
if (image_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->view=AcquireCacheView(image_view->image);
image_view->image=image;
image_view->extent.width=width;
image_view->extent.height=height;
image_view->extent.x=x;
image_view->extent.y=y;
image_view->number_threads=GetOpenMPMaximumThreads();
image_view->exception=AcquireExceptionInfo();
image_view->debug=IsEventLogging();
image_view->signature=MagickSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewDescription() associates a description with an image view.
%
% The format of the SetImageViewDescription method is:
%
% void SetImageViewDescription(ImageView *image_view,
% const char *description)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o description: the image view description.
%
*/
MagickExport void SetImageViewDescription(ImageView *image_view,
const char *description)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
image_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewIterator() iterates over the image view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetImageViewIterator method is:
%
% MagickBooleanType SetImageViewIterator(ImageView *destination,
% SetImageViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the image view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination,
SetImageViewMethod set,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(destination != (ImageView *) NULL);
assert(destination->signature == MagickSignature);
if (set == (SetImageViewMethod) NULL)
return(MagickFalse);
destination_image=destination->image;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) num_threads(destination->number_threads)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetImageViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w T h r e a d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewThreads() sets the number of threads in a thread team.
%
% The format of the SetImageViewDescription method is:
%
% void SetImageViewThreads(ImageView *image_view,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o number_threads: the number of threads in a thread team.
%
*/
MagickExport void SetImageViewThreads(ImageView *image_view,
const size_t number_threads)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
image_view->number_threads=number_threads;
if (number_threads > GetOpenMPMaximumThreads())
image_view->number_threads=GetOpenMPMaximumThreads();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferImageViewIterator() iterates over two image views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination image view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const ImageView *source,
% ImageView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferImageViewIterator method is:
%
% MagickBooleanType TransferImageViewIterator(ImageView *source,
% ImageView *destination,TransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source,
ImageView *destination,TransferImageViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickSignature);
if (transfer == (TransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const PixelPacket
*restrict pixels;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransferImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateImageViewIterator() iterates over the image view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateImageViewIterator method is:
%
% MagickBooleanType UpdateImageViewIterator(ImageView *source,
% UpdateImageViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source,
UpdateImageViewMethod update,void *context)
{
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickSignature);
if (update == (UpdateImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) num_threads(source->number_threads)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
continue;
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_UpdateImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
GB_critical_section.c | //------------------------------------------------------------------------------
// Source/Template/GB_critical_section: execute code in a critical section
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// All access to the global matrix queue, via GB_queue_* operations, must
// be done through a critical section. No other part of SuiteSparse:GraphBLAS
// uses this critical section; it is only used for accessing the global matrix
// queue via GB_queue_*. All GB_queue_* operations use the GB_CRITICAL macro
// to check the result, and if the critical section fails (ok == false),
// they return GrB_PANIC.
// Critical sections for Windows threads and ANSI C11 threads are listed below
// as drafts, but these threading models are not yet supported.
{
//--------------------------------------------------------------------------
// POSIX pthreads
//--------------------------------------------------------------------------
#if defined (USER_POSIX_THREADS)
{
ok = (pthread_mutex_lock (&GB_sync) == 0) ;
GB_CRITICAL_SECTION ;
ok = ok && (pthread_mutex_unlock (&GB_sync) == 0) ;
}
//--------------------------------------------------------------------------
// Microsoft Windows
//--------------------------------------------------------------------------
#elif defined (USER_WINDOWS_THREADS)
{
// This should work, per the Windows spec, but is not yet supported.
EnterCriticalSection (&GB_sync) ;
GB_CRITICAL_SECTION ;
LeaveCriticalSection (&GB_sync) ;
}
//--------------------------------------------------------------------------
// ANSI C11 threads
//--------------------------------------------------------------------------
#elif defined (USER_ANSI_THREADS)
{
// This should work per the ANSI C11 Spec, but is not yet supported.
ok = (mtx_lock (&GB_sync) == thrd_success) ;
GB_CRITICAL_SECTION ;
ok = ok && (mtx_unlock (&GB_sync) == thrd_success) ;
}
//--------------------------------------------------------------------------
// OpenMP
//--------------------------------------------------------------------------
#else // USER_OPENMP_THREADS or USER_NO_THREADS
{
// default: use a named OpenMP critical section. If OpenMP is not
// available, then the #pragma is ignored and this becomes vanilla,
// single-threaded code.
#pragma omp critical(GB_critical_section)
GB_CRITICAL_SECTION ;
}
#endif
}
#undef GB_CRITICAL_SECTION
|
dem_structures_coupling_utilities.h | /*
* Author: Miguel Angel Celigueta
*
* maceli@cimne.upc.edu
*/
#ifndef KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H
#define KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H
// /* External includes */
// System includes
// Project includes
#include "includes/variables.h"
/* System includes */
#include <limits>
#include <iostream>
#include <iomanip>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "custom_conditions/RigidFace.h"
#include "DEM_application_variables.h"
#include "dem_structures_coupling_application_variables.h"
#include "custom_elements/spheric_continuum_particle.h"
namespace Kratos
{
class DemStructuresCouplingUtilities
{
public:
typedef ModelPart::NodesContainerType::ContainerType::iterator NodesIteratorType;
KRATOS_CLASS_POINTER_DEFINITION(DemStructuresCouplingUtilities);
/// Default constructor.
DemStructuresCouplingUtilities(){}
/// Destructor.
virtual ~DemStructuresCouplingUtilities(){}
//***************************************************************************************************************
//***************************************************************************************************************
void TransferStructuresSkinToDem(ModelPart& r_source_model_part, ModelPart& r_destination_model_part, Properties::Pointer props) {
std::string error = CheckProvidedProperties(props);
if (error != "all_ok") KRATOS_ERROR << "The Dem Walls ModelPart has no valid Properties. Missing " << error << " . Exiting." << std::endl;
r_destination_model_part.Conditions().Sort();
int id = 1;
if (r_destination_model_part.Conditions().size()) id = (r_destination_model_part.ConditionsEnd()-1)->Id() + 1;
ModelPart::ConditionsContainerType& source_conditions = r_source_model_part.Conditions();
// Adding conditions
for (unsigned int i = 0; i < source_conditions.size(); i++) {
ModelPart::ConditionsContainerType::iterator it = r_source_model_part.ConditionsBegin() + i;
Geometry< Node<3> >::Pointer p_geometry = it->pGetGeometry();
Condition::Pointer cond = Condition::Pointer(new RigidFace3D(id, p_geometry, props));
cond->Set(DEMFlags::STICKY, true);
r_destination_model_part.AddCondition(cond); //TODO: add all of them in a single sentence! AddConditions. Use a temporary PointerVector as a list (not std::vector!).
id++;
}
// Adding nodes
r_destination_model_part.AddNodes(r_source_model_part.NodesBegin(), r_source_model_part.NodesEnd());
}
std::string CheckProvidedProperties(Properties::Pointer props) {
std::vector<Variable<double> > list_of_variables_double_to_check = {FRICTION, WALL_COHESION, SEVERITY_OF_WEAR, IMPACT_WEAR_SEVERITY, BRINELL_HARDNESS, YOUNG_MODULUS, POISSON_RATIO};
std::vector<Variable<bool> > list_of_variables_bool_to_check = {COMPUTE_WEAR};
for (int i=0; i<(int)list_of_variables_double_to_check.size(); i++) {
if(!props->Has(list_of_variables_double_to_check[i])) return list_of_variables_double_to_check[i].Name();
}
for (int i=0; i<(int)list_of_variables_bool_to_check.size(); i++) {
if(!props->Has(list_of_variables_bool_to_check[i])) return list_of_variables_bool_to_check[i].Name();
}
return "all_ok";
}
void SmoothLoadTrasferredToFem(ModelPart& r_model_part, const double portion_of_the_force_which_is_new) {
#pragma omp parallel for
for (int i=0; i<(int)r_model_part.Nodes().size(); i++) {
auto node_it = r_model_part.NodesBegin() + i;
array_1d<double, 3> averaged_force;
array_1d<double, 3>& node_dem_load = node_it->FastGetSolutionStepValue(DEM_SURFACE_LOAD);
noalias(averaged_force) = portion_of_the_force_which_is_new * node_dem_load + (1.0 - portion_of_the_force_which_is_new) * node_it->FastGetSolutionStepValue(DEM_SURFACE_LOAD, 1);
noalias(node_dem_load) = averaged_force;
}
}
void ComputeSandProduction(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) {
const std::string filename = "sand_production_graph.txt";
std::ifstream ifile(filename.c_str());
static bool first_time_entered = true;
if ((bool) ifile && first_time_entered) {
std::remove("sand_production_graph.txt");
first_time_entered = false;
}
ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements();
double current_total_mass_in_grams = 0.0;
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element);
if (p_sphere->Is(ISOLATED)) continue;
const double particle_radius = p_sphere->GetRadius();
const double particle_density = p_sphere->GetDensity();
current_total_mass_in_grams += (4.0/3.0) * Globals::Pi * particle_density * particle_radius * particle_radius * particle_radius * 1000.0;
}
static const double initial_total_mass_in_grams = current_total_mass_in_grams;
const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams;
ModelPart::ConditionsContainerType::iterator condition_begin = outer_walls_model_part.ConditionsBegin();
const double face_pressure_in_psi = condition_begin->GetValue(POSITIVE_FACE_PRESSURE) * 0.000145;
static std::ofstream sand_prod_file("sand_production_graph.txt", std::ios_base::out | std::ios_base::app);
sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n';
sand_prod_file.flush();
}
void MarkBrokenSpheres(ModelPart& dem_model_part) {
ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements();
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element);
if (p_sphere->Is(ISOLATED)) continue;
bool go_to_next_particle = false;
for (unsigned int i = 0; i < p_sphere->mContinuumInitialNeighborsSize; i++) {
if (!p_sphere->mIniNeighbourFailureId[i]) {
go_to_next_particle = true;
break;
}
}
if (go_to_next_particle) continue;
else p_sphere->Set(ISOLATED, true);
}
}
void ComputeSandProductionWithDepthFirstSearch(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) {
const std::string filename = "sand_production_graph_with_chunks.txt";
std::ifstream ifile(filename.c_str());
static bool first_time_entered = true;
if ((bool) ifile && first_time_entered) {
std::remove("sand_production_graph_with_chunks.txt");
first_time_entered = false;
}
ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements();
std::vector<double> chunks_masses;
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
it->Set(VISITED, false);
}
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element);
double this_chunk_mass = 0.0;
if( it->IsNot(VISITED) ) {
DepthFirstSearchVisit(p_sphere, this_chunk_mass);
chunks_masses.push_back(this_chunk_mass);
}
}
const double max_mass_of_a_single_chunck = *std::max_element(chunks_masses.begin(), chunks_masses.end());
const double current_total_mass_in_grams = max_mass_of_a_single_chunck;
static const double initial_total_mass_in_grams = current_total_mass_in_grams;
const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams;
ModelPart::ConditionsContainerType::iterator condition_begin = outer_walls_model_part.ConditionsBegin();
const double face_pressure_in_psi = condition_begin->GetValue(POSITIVE_FACE_PRESSURE) * 0.000145;
static std::ofstream sand_prod_file("sand_production_graph_with_chunks.txt", std::ios_base::out | std::ios_base::app);
sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n';
sand_prod_file.flush();
}
void DepthFirstSearchVisit(SphericContinuumParticle* p_sphere, double& this_chunk_mass) {
p_sphere->Set(VISITED, true);
const double particle_radius = p_sphere->GetRadius();
const double particle_density = p_sphere->GetDensity();
this_chunk_mass += (4.0/3.0) * Globals::Pi * particle_density * particle_radius * particle_radius * particle_radius * 1000.0;
for (size_t i=0; i<p_sphere->mContinuumInitialNeighborsSize; i++) {
SphericParticle* p_neighbour_sphere = p_sphere->mNeighbourElements[i];
if (p_neighbour_sphere==NULL) continue;
if (p_sphere->mIniNeighbourFailureId[i]) continue;
if (p_neighbour_sphere->IsNot(VISITED)) {
SphericContinuumParticle* p_neigh_cont_sphere = dynamic_cast<SphericContinuumParticle*>(p_neighbour_sphere);
DepthFirstSearchVisit(p_neigh_cont_sphere, this_chunk_mass);
}
}
}
void ComputeTriaxialSandProduction(ModelPart& dem_model_part, ModelPart& outer_walls_model_part_1, ModelPart& outer_walls_model_part_2, const double time) {
const std::string filename = "sand_production_graph.txt";
std::ifstream ifile(filename.c_str());
static bool first_time_entered = true;
if ((bool) ifile && first_time_entered) {
std::remove("sand_production_graph.txt");
first_time_entered = false;
}
ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements();
double current_total_mass_in_grams = 0.0;
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element);
if (p_sphere->Is(ISOLATED)) continue;
const double particle_radius = p_sphere->GetRadius();
const double particle_density = p_sphere->GetDensity();
current_total_mass_in_grams += (4.0/3.0) * Globals::Pi * particle_density * particle_radius * particle_radius * particle_radius * 1000.0;
}
static const double initial_total_mass_in_grams = current_total_mass_in_grams;
const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams;
ModelPart::ConditionsContainerType::iterator condition_begin_1 = outer_walls_model_part_1.ConditionsBegin();
ModelPart::ConditionsContainerType::iterator condition_begin_2 = outer_walls_model_part_2.ConditionsBegin();
const double face_pressure_in_psi = (condition_begin_1->GetValue(POSITIVE_FACE_PRESSURE) +
condition_begin_2->GetValue(POSITIVE_FACE_PRESSURE) +
3.45e6) * 0.000145 * 0.33333333333333; // 3.45e6 is the sigma_z constant pressure
static std::ofstream sand_prod_file("sand_production_graph.txt", std::ios_base::out | std::ios_base::app);
sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n';
sand_prod_file.flush();
}
//***************************************************************************************************************
//***************************************************************************************************************
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a stemplate<class T, std::size_t dim> tring.
virtual std::string Info() const
{
return "";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member r_variables
///@{
///@}
///@name Protected member r_variables
///@{ template<class T, std::size_t dim>
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member r_variables
///@{
///@}
///@name Member r_variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
DemStructuresCouplingUtilities & operator=(DemStructuresCouplingUtilities const& rOther);
///@}
}; // Class DemStructuresCouplingUtilities
} // namespace Python.
#endif // KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H
|
Clustering.h | //
// Copyright (C) 2015-2020 Yahoo Japan Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#pragma once
#include "NGT/Index.h"
using namespace std;
#if defined(NGT_AVX_DISABLED)
#define NGT_CLUSTER_NO_AVX
#else
#if defined(__AVX2__)
#define NGT_CLUSTER_AVX2
#else
#define NGT_CLUSTER_NO_AVX
#endif
#endif
#if defined(NGT_CLUSTER_NO_AVX)
#warning "*** SIMD is *NOT* available! ***"
#else
#include <immintrin.h>
#endif
#include <omp.h>
#include <random>
namespace NGT {
class Clustering {
public:
enum InitializationMode {
InitializationModeHead = 0,
InitializationModeRandom = 1,
InitializationModeKmeansPlusPlus = 2
};
enum ClusteringType {
ClusteringTypeKmeansWithNGT = 0,
ClusteringTypeKmeansWithoutNGT = 1,
ClusteringTypeKmeansWithIteration = 2,
ClusteringTypeKmeansWithNGTForCentroids = 3
};
class Entry {
public:
Entry():vectorID(0), centroidID(0), distance(0.0) {}
Entry(size_t vid, size_t cid, double d):vectorID(vid), centroidID(cid), distance(d) {}
bool operator<(const Entry &e) const {return distance > e.distance;}
uint32_t vectorID;
uint32_t centroidID;
double distance;
};
class DescendingEntry {
public:
DescendingEntry(size_t vid, double d):vectorID(vid), distance(d) {}
bool operator<(const DescendingEntry &e) const {return distance < e.distance;}
size_t vectorID;
double distance;
};
class Cluster {
public:
Cluster(std::vector<float> &c):centroid(c), radius(0.0) {}
Cluster(const Cluster &c) { *this = c; }
Cluster &operator=(const Cluster &c) {
members = c.members;
centroid = c.centroid;
radius = c.radius;
return *this;
}
std::vector<Entry> members;
std::vector<float> centroid;
double radius;
};
Clustering(InitializationMode im = InitializationModeHead, ClusteringType ct = ClusteringTypeKmeansWithNGT, size_t mi = 100):
clusteringType(ct), initializationMode(im), maximumIteration(mi) { initialize(); }
void initialize() {
epsilonFrom = 0.12;
epsilonTo = epsilonFrom;
epsilonStep = 0.04;
resultSizeCoefficient = 5;
}
static void
convert(std::vector<std::string> &strings, std::vector<float> &vector) {
vector.clear();
for (auto it = strings.begin(); it != strings.end(); ++it) {
vector.push_back(stod(*it));
}
}
static void
extractVector(const std::string &str, std::vector<float> &vec)
{
std::vector<std::string> tokens;
NGT::Common::tokenize(str, tokens, " \t");
convert(tokens, vec);
}
static void
loadVectors(const std::string &file, std::vector<std::vector<float> > &vectors)
{
std::ifstream is(file);
if (!is) {
throw std::runtime_error("loadVectors::Cannot open " + file );
}
std::string line;
while (getline(is, line)) {
std::vector<float> v;
extractVector(line, v);
vectors.push_back(v);
}
}
static void
saveVectors(const std::string &file, std::vector<std::vector<float> > &vectors)
{
std::ofstream os(file);
for (auto vit = vectors.begin(); vit != vectors.end(); ++vit) {
std::vector<float> &v = *vit;
for (auto it = v.begin(); it != v.end(); ++it) {
os << std::setprecision(9) << (*it);
if (it + 1 != v.end()) {
os << "\t";
}
}
os << std::endl;
}
}
static void
saveVector(const std::string &file, std::vector<size_t> &vectors)
{
std::ofstream os(file);
for (auto vit = vectors.begin(); vit != vectors.end(); ++vit) {
os << *vit << std::endl;
}
}
static void
loadClusters(const std::string &file, std::vector<Cluster> &clusters, size_t numberOfClusters = 0)
{
std::ifstream is(file);
if (!is) {
throw std::runtime_error("loadClusters::Cannot open " + file);
}
std::string line;
while (getline(is, line)) {
std::vector<float> v;
extractVector(line, v);
clusters.push_back(v);
if ((numberOfClusters != 0) && (clusters.size() >= numberOfClusters)) {
break;
}
}
if ((numberOfClusters != 0) && (clusters.size() < numberOfClusters)) {
std::cerr << "initial cluster data are not enough. " << clusters.size() << ":" << numberOfClusters << std::endl;
exit(1);
}
}
#if !defined(NGT_CLUSTER_NO_AVX)
static double
sumOfSquares(float *a, float *b, size_t size) {
__m256 sum = _mm256_setzero_ps();
float *last = a + size;
float *lastgroup = last - 7;
while (a < lastgroup) {
__m256 v = _mm256_sub_ps(_mm256_loadu_ps(a), _mm256_loadu_ps(b));
sum = _mm256_add_ps(sum, _mm256_mul_ps(v, v));
a += 8;
b += 8;
}
__attribute__((aligned(32))) float f[8];
_mm256_store_ps(f, sum);
double s = f[0] + f[1] + f[2] + f[3] + f[4] + f[5] + f[6] + f[7];
while (a < last) {
double d = *a++ - *b++;
s += d * d;
}
return s;
}
#else // !defined(NGT_AVX_DISABLED) && defined(__AVX__)
static double
sumOfSquares(float *a, float *b, size_t size) {
double csum = 0.0;
float *x = a;
float *y = b;
for (size_t i = 0; i < size; i++) {
double d = (double)*x++ - (double)*y++;
csum += d * d;
}
return csum;
}
#endif // !defined(NGT_AVX_DISABLED) && defined(__AVX__)
static double
distanceL2(std::vector<float> &vector1, std::vector<float> &vector2) {
return sqrt(sumOfSquares(&vector1[0], &vector2[0], vector1.size()));
}
static double
distanceL2(std::vector<std::vector<float> > &vector1, std::vector<std::vector<float> > &vector2) {
assert(vector1.size() == vector2.size());
double distance = 0.0;
for (size_t i = 0; i < vector1.size(); i++) {
distance += distanceL2(vector1[i], vector2[i]);
}
distance /= (double)vector1.size();
return distance;
}
static double
meanSumOfSquares(std::vector<float> &vector1, std::vector<float> &vector2) {
return sumOfSquares(&vector1[0], &vector2[0], vector1.size()) / (double)vector1.size();
}
static void
subtract(std::vector<float> &a, std::vector<float> &b) {
assert(a.size() == b.size());
auto bit = b.begin();
for (auto ait = a.begin(); ait != a.end(); ++ait, ++bit) {
*ait = *ait - *bit;
}
}
static void
getInitialCentroidsFromHead(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t size)
{
size = size > vectors.size() ? vectors.size() : size;
clusters.clear();
for (size_t i = 0; i < size; i++) {
clusters.push_back(Cluster(vectors[i]));
}
}
static void
getInitialCentroidsRandomly(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t size, size_t seed)
{
clusters.clear();
std::random_device rnd;
if (seed == 0) {
seed = rnd();
}
std::mt19937 mt(seed);
for (size_t i = 0; i < size; i++) {
size_t idx = mt() * vectors.size() / mt.max();
if (idx >= size) {
i--;
continue;
}
clusters.push_back(Cluster(vectors[idx]));
}
assert(clusters.size() == size);
}
static void
getInitialCentroidsKmeansPlusPlus(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t size)
{
size = size > vectors.size() ? vectors.size() : size;
clusters.clear();
std::random_device rnd;
std::mt19937 mt(rnd());
size_t idx = (long long)mt() * (long long)vectors.size() / (long long)mt.max();
clusters.push_back(Cluster(vectors[idx]));
NGT::Timer timer;
for (size_t k = 1; k < size; k++) {
double sum = 0;
std::priority_queue<DescendingEntry> sortedObjects;
// get d^2 and sort
#pragma omp parallel for
for (size_t vi = 0; vi < vectors.size(); vi++) {
auto vit = vectors.begin() + vi;
double mind = DBL_MAX;
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
double d = distanceL2(*vit, (*cit).centroid);
d *= d;
if (d < mind) {
mind = d;
}
}
#pragma omp critical
{
sortedObjects.push(DescendingEntry(distance(vectors.begin(), vit), mind));
sum += mind;
}
}
double l = (double)mt() / (double)mt.max() * sum;
while (!sortedObjects.empty()) {
sum -= sortedObjects.top().distance;
if (l >= sum) {
clusters.push_back(Cluster(vectors[sortedObjects.top().vectorID]));
break;
}
sortedObjects.pop();
}
}
}
static void
assign(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters,
size_t clusterSize = std::numeric_limits<size_t>::max()) {
// compute distances to the nearest clusters, and construct heap by the distances.
NGT::Timer timer;
timer.start();
std::vector<Entry> sortedObjects(vectors.size());
#pragma omp parallel for
for (size_t vi = 0; vi < vectors.size(); vi++) {
auto vit = vectors.begin() + vi;
{
double mind = DBL_MAX;
size_t mincidx = -1;
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
double d = distanceL2(*vit, (*cit).centroid);
if (d < mind) {
mind = d;
mincidx = distance(clusters.begin(), cit);
}
}
sortedObjects[vi] = Entry(vi, mincidx, mind);
}
}
std::sort(sortedObjects.begin(), sortedObjects.end());
// clear
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
(*cit).members.clear();
}
// distribute objects to the nearest clusters in the same size constraint.
for (auto soi = sortedObjects.rbegin(); soi != sortedObjects.rend();) {
Entry &entry = *soi;
if (entry.centroidID >= clusters.size()) {
std::cerr << "Something wrong. " << entry.centroidID << ":" << clusters.size() << std::endl;
soi++;
continue;
}
if (clusters[entry.centroidID].members.size() < clusterSize) {
clusters[entry.centroidID].members.push_back(entry);
soi++;
} else {
double mind = DBL_MAX;
size_t mincidx = -1;
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
if ((*cit).members.size() >= clusterSize) {
continue;
}
double d = distanceL2(vectors[entry.vectorID], (*cit).centroid);
if (d < mind) {
mind = d;
mincidx = distance(clusters.begin(), cit);
}
}
entry = Entry(entry.vectorID, mincidx, mind);
int pt = distance(sortedObjects.rbegin(), soi);
std::sort(sortedObjects.begin(), soi.base());
soi = sortedObjects.rbegin() + pt;
assert(pt == distance(sortedObjects.rbegin(), soi));
}
}
moveFartherObjectsToEmptyClusters(clusters);
}
static void moveFartherObjectsToEmptyClusters(std::vector<Cluster> &clusters) {
size_t emptyClusterCount = 0;
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
if ((*cit).members.size() == 0) {
emptyClusterCount++;
double max = 0.0;
auto maxit = clusters.begin();
for (auto scit = clusters.begin(); scit != clusters.end(); ++scit) {
if ((*scit).members.size() >= 2 && (*scit).members.back().distance > max) {
maxit = scit;
max = (*scit).members.back().distance;
}
}
(*cit).members.push_back((*maxit).members.back());
(*cit).members.back().centroidID = distance(clusters.begin(), cit);
(*maxit).members.pop_back();
}
}
emptyClusterCount = 0;
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
if ((*cit).members.size() == 0) {
emptyClusterCount++;
}
}
}
static void
assignWithNGT(NGT::Index &index, std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters,
float &radius, size_t &resultSize, float epsilon = 0.12, size_t notRetrievedObjectCount = 0) {
size_t dataSize = vectors.size();
assert(index.getObjectRepositorySize() - 1 == vectors.size());
vector<vector<Entry> > results(clusters.size());
#pragma omp parallel for
for (size_t ci = 0; ci < clusters.size(); ci++) {
auto cit = clusters.begin() + ci;
NGT::ObjectDistances objects; // result set
NGT::Object *query = 0;
query = index.allocateObject((*cit).centroid);
// set search prameters.
NGT::SearchContainer sc(*query); // search parametera container.
sc.setResults(&objects); // set the result set.
sc.setEpsilon(epsilon); // set exploration coefficient.
if (radius > 0.0) {
sc.setRadius(radius);
sc.setSize(dataSize / 2);
} else {
sc.setSize(resultSize); // the number of resultant objects.
}
index.search(sc);
results[ci].reserve(objects.size());
for (size_t idx = 0; idx < objects.size(); idx++) {
size_t oidx = objects[idx].id - 1;
results[ci].push_back(Entry(oidx, ci, objects[idx].distance));
}
index.deleteObject(query);
}
size_t resultCount = 0;
for (auto ri = results.begin(); ri != results.end(); ++ri) {
resultCount += (*ri).size();
}
vector<Entry> sortedResults;
sortedResults.reserve(resultCount);
for (auto ri = results.begin(); ri != results.end(); ++ri) {
auto end = (*ri).begin();
for (; end != (*ri).end(); ++end) {
}
std::copy((*ri).begin(), end, std::back_inserter(sortedResults));
}
vector<bool> processedObjects(dataSize, false);
for (auto i = sortedResults.begin(); i != sortedResults.end(); ++i) {
processedObjects[(*i).vectorID] = true;
}
notRetrievedObjectCount = 0;
vector<uint32_t> notRetrievedObjectIDs;
for (size_t idx = 0; idx < dataSize; idx++) {
if (!processedObjects[idx]) {
notRetrievedObjectCount++;
notRetrievedObjectIDs.push_back(idx);
}
}
sort(sortedResults.begin(), sortedResults.end());
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
(*cit).members.clear();
}
for (auto i = sortedResults.rbegin(); i != sortedResults.rend(); ++i) {
size_t objectID = (*i).vectorID;
size_t clusterID = (*i).centroidID;
if (processedObjects[objectID]) {
processedObjects[objectID] = false;
clusters[clusterID].members.push_back(*i);
clusters[clusterID].members.back().centroidID = clusterID;
radius = (*i).distance;
}
}
vector<Entry> notRetrievedObjects(notRetrievedObjectIDs.size());
#pragma omp parallel for
for (size_t vi = 0; vi < notRetrievedObjectIDs.size(); vi++) {
auto vit = notRetrievedObjectIDs.begin() + vi;
{
double mind = DBL_MAX;
size_t mincidx = -1;
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
double d = distanceL2(vectors[*vit], (*cit).centroid);
if (d < mind) {
mind = d;
mincidx = distance(clusters.begin(), cit);
}
}
notRetrievedObjects[vi] = Entry(*vit, mincidx, mind); // Entry(vectorID, centroidID, distance)
}
}
sort(notRetrievedObjects.begin(), notRetrievedObjects.end());
for (auto nroit = notRetrievedObjects.begin(); nroit != notRetrievedObjects.end(); ++nroit) {
clusters[(*nroit).centroidID].members.push_back(*nroit);
}
moveFartherObjectsToEmptyClusters(clusters);
}
static double
calculateCentroid(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters) {
double distance = 0;
size_t memberCount = 0;
for (auto it = clusters.begin(); it != clusters.end(); ++it) {
memberCount += (*it).members.size();
if ((*it).members.size() != 0) {
std::vector<float> mean(vectors[0].size(), 0.0);
for (auto memit = (*it).members.begin(); memit != (*it).members.end(); ++memit) {
auto mit = mean.begin();
auto &v = vectors[(*memit).vectorID];
for (auto vit = v.begin(); vit != v.end(); ++vit, ++mit) {
*mit += *vit;
}
}
for (auto mit = mean.begin(); mit != mean.end(); ++mit) {
*mit /= (*it).members.size();
}
distance += distanceL2((*it).centroid, mean);
(*it).centroid = mean;
} else {
cerr << "Clustering: Fatal Error. No member!" << endl;
abort();
}
}
return distance;
}
static void
saveClusters(const std::string &file, std::vector<Cluster> &clusters)
{
std::ofstream os(file);
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
std::vector<float> &v = (*cit).centroid;
for (auto it = v.begin(); it != v.end(); ++it) {
os << std::setprecision(9) << (*it);
if (it + 1 != v.end()) {
os << "\t";
}
}
os << std::endl;
}
}
double kmeansWithoutNGT(std::vector<std::vector<float> > &vectors, size_t numberOfClusters,
std::vector<Cluster> &clusters)
{
size_t clusterSize = std::numeric_limits<size_t>::max();
if (clusterSizeConstraint) {
clusterSize = ceil((double)vectors.size() / (double)numberOfClusters);
}
double diff = 0;
for (size_t i = 0; i < maximumIteration; i++) {
std::cerr << "iteration=" << i << std::endl;
assign(vectors, clusters, clusterSize);
// centroid is recomputed.
// diff is distance between the current centroids and the previous centroids.
diff = calculateCentroid(vectors, clusters);
if (diff == 0) {
break;
}
}
return diff == 0;
}
double kmeansWithNGT(NGT::Index &index, std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters, float epsilon)
{
diffHistory.clear();
NGT::Timer timer;
timer.start();
float radius;
double diff = 0.0;
size_t resultSize;
resultSize = resultSizeCoefficient * vectors.size() / clusters.size();
for (size_t i = 0; i < maximumIteration; i++) {
size_t notRetrievedObjectCount = 0;
radius = -1.0;
assignWithNGT(index, vectors, clusters, radius, resultSize, epsilon, notRetrievedObjectCount);
// centroid is recomputed.
// diff is distance between the current centroids and the previous centroids.
std::vector<Cluster> prevClusters = clusters;
diff = calculateCentroid(vectors, clusters);
timer.stop();
std::cerr << "iteration=" << i << " time=" << timer << " diff=" << diff << std::endl;
timer.start();
diffHistory.push_back(diff);
if (diff == 0) {
break;
}
}
return diff;
}
double kmeansWithNGT(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters)
{
pid_t pid = getpid();
std::stringstream str;
str << "cluster-ngt." << pid;
string database = str.str();
string dataFile;
size_t dataSize = 0;
size_t dim = clusters.front().centroid.size();
NGT::Property property;
property.dimension = dim;
property.graphType = NGT::Property::GraphType::GraphTypeANNG;
property.objectType = NGT::Index::Property::ObjectType::Float;
property.distanceType = NGT::Index::Property::DistanceType::DistanceTypeL2;
NGT::Index::createGraphAndTree(database, property, dataFile, dataSize);
float *data = new float[vectors.size() * dim];
float *ptr = data;
dataSize = vectors.size();
for (auto vi = vectors.begin(); vi != vectors.end(); ++vi) {
memcpy(ptr, &((*vi)[0]), dim * sizeof(float));
ptr += dim;
}
size_t threadSize = 20;
NGT::Index::append(database, data, dataSize, threadSize);
delete[] data;
NGT::Index index(database);
return kmeansWithNGT(index, vectors, numberOfClusters, clusters, epsilonFrom);
}
double kmeansWithNGT(NGT::Index &index, size_t numberOfClusters, std::vector<Cluster> &clusters)
{
NGT::GraphIndex &graph = static_cast<NGT::GraphIndex&>(index.getIndex());
NGT::ObjectSpace &os = graph.getObjectSpace();
size_t size = os.getRepository().size();
std::vector<std::vector<float> > vectors(size - 1);
for (size_t idx = 1; idx < size; idx++) {
try {
os.getObject(idx, vectors[idx - 1]);
} catch(...) {
cerr << "Cannot get object " << idx << endl;
}
}
cerr << "# of data for clustering=" << vectors.size() << endl;
double diff = DBL_MAX;
clusters.clear();
setupInitialClusters(vectors, numberOfClusters, clusters);
for (float epsilon = epsilonFrom; epsilon <= epsilonTo; epsilon += epsilonStep) {
cerr << "epsilon=" << epsilon << endl;
diff = kmeansWithNGT(index, vectors, numberOfClusters, clusters, epsilon);
if (diff == 0.0) {
return diff;
}
}
return diff;
}
double kmeansWithNGT(NGT::Index &index, size_t numberOfClusters, NGT::Index &outIndex)
{
std::vector<Cluster> clusters;
double diff = kmeansWithNGT(index, numberOfClusters, clusters);
for (auto i = clusters.begin(); i != clusters.end(); ++i) {
outIndex.insert((*i).centroid);
}
outIndex.createIndex(16);
return diff;
}
double kmeansWithNGT(NGT::Index &index, size_t numberOfClusters)
{
NGT::Property prop;
index.getProperty(prop);
string path = index.getPath();
index.save();
index.close();
string outIndexName = path;
string inIndexName = path + ".tmp";
std::rename(outIndexName.c_str(), inIndexName.c_str());
NGT::Index::createGraphAndTree(outIndexName, prop);
index.open(outIndexName);
NGT::Index inIndex(inIndexName);
double diff = kmeansWithNGT(inIndex, numberOfClusters, index);
inIndex.close();
NGT::Index::destroy(inIndexName);
return diff;
}
double kmeansWithNGT(string &indexName, size_t numberOfClusters)
{
NGT::Index inIndex(indexName);
double diff = kmeansWithNGT(inIndex, numberOfClusters);
inIndex.save();
inIndex.close();
return diff;
}
static double
calculateMSE(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters)
{
double mse = 0.0;
size_t count = 0;
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
count += (*cit).members.size();
for (auto mit = (*cit).members.begin(); mit != (*cit).members.end(); ++mit) {
mse += meanSumOfSquares((*cit).centroid, vectors[(*mit).vectorID]);
}
}
assert(vectors.size() == count);
return mse / (double)vectors.size();
}
static double
calculateML2(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters)
{
double d = 0.0;
size_t count = 0;
for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) {
count += (*cit).members.size();
double localD= 0.0;
for (auto mit = (*cit).members.begin(); mit != (*cit).members.end(); ++mit) {
double distance = distanceL2((*cit).centroid, vectors[(*mit).vectorID]);
d += distance;
localD += distance;
}
}
if (vectors.size() != count) {
std::cerr << "Warning! vectors.size() != count" << std::endl;
}
return d / (double)vectors.size();
}
static double
calculateML2FromSpecifiedCentroids(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters,
std::vector<size_t> ¢roidIds)
{
double d = 0.0;
size_t count = 0;
for (auto it = centroidIds.begin(); it != centroidIds.end(); ++it) {
Cluster &cluster = clusters[(*it)];
count += cluster.members.size();
for (auto mit = cluster.members.begin(); mit != cluster.members.end(); ++mit) {
d += distanceL2(cluster.centroid, vectors[(*mit).vectorID]);
}
}
return d / (double)vectors.size();
}
void
setupInitialClusters(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters)
{
if (clusters.empty()) {
switch (initializationMode) {
case InitializationModeHead:
{
getInitialCentroidsFromHead(vectors, clusters, numberOfClusters);
break;
}
case InitializationModeRandom:
{
getInitialCentroidsRandomly(vectors, clusters, numberOfClusters, 0);
break;
}
case InitializationModeKmeansPlusPlus:
{
getInitialCentroidsKmeansPlusPlus(vectors, clusters, numberOfClusters);
break;
}
default:
std::cerr << "proper initMode is not specified." << std::endl;
exit(1);
}
}
}
bool
kmeans(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters)
{
setupInitialClusters(vectors, numberOfClusters, clusters);
switch (clusteringType) {
case ClusteringTypeKmeansWithoutNGT:
return kmeansWithoutNGT(vectors, numberOfClusters, clusters);
break;
case ClusteringTypeKmeansWithNGT:
return kmeansWithNGT(vectors, numberOfClusters, clusters);
break;
default:
cerr << "kmeans::fatal error!. invalid clustering type. " << clusteringType << endl;
abort();
break;
}
}
static void
evaluate(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, char mode,
std::vector<size_t> centroidIds = std::vector<size_t>())
{
size_t clusterSize = std::numeric_limits<size_t>::max();
assign(vectors, clusters, clusterSize);
std::cout << "The number of vectors=" << vectors.size() << std::endl;
std::cout << "The number of centroids=" << clusters.size() << std::endl;
if (centroidIds.size() == 0) {
switch (mode) {
case 'e':
std::cout << "MSE=" << calculateMSE(vectors, clusters) << std::endl;
break;
case '2':
default:
std::cout << "ML2=" << calculateML2(vectors, clusters) << std::endl;
break;
}
} else {
switch (mode) {
case 'e':
break;
case '2':
default:
std::cout << "ML2=" << calculateML2FromSpecifiedCentroids(vectors, clusters, centroidIds) << std::endl;
break;
}
}
}
ClusteringType clusteringType;
InitializationMode initializationMode;
size_t numberOfClusters;
bool clusterSizeConstraint;
size_t maximumIteration;
float epsilonFrom;
float epsilonTo;
float epsilonStep;
size_t resultSizeCoefficient;
vector<double> diffHistory;
};
}
|
nusd.c | #pragma warning(disable : 4996)
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <locale.h>
#define _CRT_SECURE_NO_WARNINGS
#define min(x,y) ((x) < (y) ? (x) : (y))
#define max(x,y) ((x) > (y) ? (x) : (y))
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
#define S0(a, i, j, k) c[i][j] = c[i][k] + c[k][j]
//#define match(b1, b2) (((b1)+(b2)) == 3 ? 1 : 0)
#define sigma(i, j) (match(seq[i], seq[j]))
int max_score(int s1, int s2)
{
if (s1 >= s2)
return s1;
return s2;
}
int max_sc(int s1, int s2, int s3) {
if (s1>=s2 && s1>=s3)
return s1;
if (s2>=s3)
return s2;
return s3;
}
int match(const int e1, const int e2)
{
/*
* 'A' => 0 -> bitowo 0001 -> 1
* 'G' => 1 -> bitowo 0010 -> 2
* 'C' => 2 -> bitowo 0100 -> 4
* 'U' => 3 -> bitowo 1000 -> 8
*/
//const bool match =
// (e1 == 0 && e2 == 3) || (e1 == 3 && e2 == 0) ||
// (e1 == 1 && e2 == 2) || (e1 == 2 && e2 == 1) ||
// (e1 == 1 && e2 == 3) || (e1 == 3 && e2 == 1);
//return match;
const int match =
(e1 + e2 == 9) ||
(e1 + e2 == 6) ||
(e1 + e2 == 10) ;
return match;
//(e1 == "A" && e2 == "U") ||
//(e1 == "U" && e2 == "A") ||
//(e1 == "G" && e2 == "C") ||
//(e1 == "C" && e2 == "G") ||
//(e1 == "G" && e2 == "U") ||
//(e1 == "U" && e2 == "G");
}
void printMatrix(int**, int, int);
int ** getFullCopy(int ** table, int N);
int** allocateMatrix(int);
void deallocateMatrix(int**, int);
void write_results_full(int , double , char );
void write_results(int , double );
void computeDYN1Imperfect(int** table, int n, int *seq) {
int** S = getFullCopy(table, n);
double start = omp_get_wtime();
for (int c0 = floord(-n + 113, 1680) + 1; c0 <= floord(n - 2, 120) + 1; c0 += 1) {
#pragma omp parallel for num_threads(MYTHREADS)
for (int c1 = max(-c0 - (n + 110) / 112 + 1, -((n + 118) / 120)); c1 <= min(14 * c0 - 1, -c0); c1 += 1) {
for (int c4 = max(max(-n + 2, 112 * c0 + 112 * c1 - 111), 120 * c1 + 1); c4 <= 112 * c0 + 112 * c1; c4 += 1) {
for (int c5 = max(-120 * c1 - 119, -c4 + 1); c5 <= min(n - 1, -120 * c1); c5 += 1) {
for (int c7 = -c4; c7 < c5; c7 += 1) {
S[-c4][c5] = max_score(S[-c4][c7] + S[c7 + 1][c5], S[-c4][c5]);
}
S[-c4][c5] = max_score(S[-c4][c5], S[-c4 + 1][c5 - 1] + match(seq[-c4], seq[c5]));
}
}
}
}
double execution_time = omp_get_wtime() - start;
printf("IMPE: %lf\n", execution_time);
write_results(n, execution_time);
printMatrix(S, n, 1);
deallocateMatrix(S, n);
}
void computeNusOriginal(int** table, int n, int *seq) {
int** S = getFullCopy(table, n);
double start = omp_get_wtime();
for (int i = n - 1; i >= 0; i--) {
for (int j = i + 1; j < n; j++) {
for (int k = i; k < j; k++) {
S[i][j] = max(S[i][k] + S[k+1][j], S[i][j]); // s1
}
S[i][j] = max(S[i][j], S[i+1][j-1] + match(seq[i], seq[j])); // s2
}
}
double execution_time = omp_get_wtime() - start;
printf("ORIG: %lf\n", execution_time);
write_results(n, execution_time);
printMatrix(S, n, 0);
deallocateMatrix(S, n);
}
void computeDYN2Perfect(int** table, int n, int *seq) {
int** S = getFullCopy(table, n);
double start = omp_get_wtime();
//Listing 1.2: Perfectly nested Nussinov loops
int t1, t2, t3, t4, t5, t6;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
if (n >= 3) {
for (t1=1;t1<=n-2;t1++) {
lbp=ceild(t1-25,26);
ubp=floord(-t1+2*n-2,26);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(-t1+13*t2-28,30)),ceild(26*t2-n-28,30));t3<=min(floord(-t1+n-1,30),floord(-t1+26*t2+25,60));t3++) {
if (t1 >= 2) {
for (t4=max(30*t3,-t1+13*t2+1);t4<=min(min(-2*t1+n,30*t3+29),-t1+13*t2+13);t4++) {
lbv=max(26*t2,t1+2*t4);
ubv=2*t1+2*t4-2;
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
S[t4][(-t4+t5)] = max_sc(S[t4][-t1 + (-t4+t5)] + S[-t1 + (-t4+t5) + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
S[t4][(-t4+t5)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
}
S[t4][(2*t1+t4-1)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(2*t1+t4-1)], S[t4][(2*t1+t4-1)], S[t4 + 1][(2*t1+t4-1) - 1] + sigma(t4, (2*t1+t4-1)));;
}
}
for (t4=max(max(30*t3,-2*t1+n+1),26*t2-n+1);t4<=min(min(30*t3+29,-t1+n-1),-t1+13*t2+13);t4++) {
lbv=max(26*t2,t1+2*t4);
ubv=t4+n-1;
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
S[t4][(-t4+t5)] = max_sc(S[t4][-t1 + (-t4+t5)] + S[-t1 + (-t4+t5) + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
S[t4][(-t4+t5)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
}
}
for (t4=max(max(30*t3,-t1+13*t2+14),26*t2-n+1);t4<=min(min(floord(-t1+26*t2+25,2),30*t3+29),-t1+n-1);t4++) {
lbv=max(26*t2,t1+2*t4);
ubv=min(26*t2+25,t4+n-1);
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
S[t4][(-t4+t5)] = max_sc(S[t4][-t1 + (-t4+t5)] + S[-t1 + (-t4+t5) + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
S[t4][(-t4+t5)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));;
}
}
if (t1 == 1) {
for (t4=max(13*t2,30*t3);t4<=min(min(n-2,13*t2+12),30*t3+29);t4++) {
S[t4][(t4+1)] = max_sc(S[t4][1 + t4 - 1] + S[1 + t4 - 1 + 1][(t4+1)], S[t4][(t4+1)], S[t4 + 1][(t4+1) - 1] + sigma(t4, (t4+1)));;
}
}
}
}
}
}
double execution_time = omp_get_wtime() - start;
printf("PLUT: %lf\n", execution_time);
write_results(n, execution_time);
printMatrix(S, n, 2);
deallocateMatrix(S, n);
}
void computeDYN3ImperfA(int** table, int n, int *seq) {
int** S = getFullCopy(table, n);
double start = omp_get_wtime();
for (int c0 = floord(-31 * n + 115, 3132) + 2; c0 <= floord(79 * n - 158, 2436) + 2; c0 += 1) {
#pragma omp parallel for num_threads(MYTHREADS)
for (int c1 = max(-c0 - (n + 52) / 54 + 2, -((n + 114) / 116)); c1 <= min(min(-c0 + (n - 2) / 42 + 1, c0 + floord(-4 * c0 + 3, 31) - 1), floord(-21 * c0 + 20, 79)); c1 += 1) {
for (int c2 = max(-c0 + c1 + floord(21 * c0 - 17 * c1 - 21, 48) + 1, -c0 - c1 - (n - 42 * c0 - 42 * c1 + 136) / 96 + 1); c2 <= min(min(-1, -c0 - c1), -((27 * c0 - 31 * c1 + 54) / 69) + 1); c2 += 1) {
for (int c5 = max(27 * c0 - 31 * c1 + 27 * c2 - 83, -42 * c2 - 41); c5 <= min(min(n + 54 * c0 + 54 * c1 + 54 * c2 - 1, -42 * c2), 54 * c0 - 62 * c1 + 54 * c2); c5 += 1) {
for (int c6 = max(-54 * c0 - 54 * c1 - 54 * c2, -116 * c1 - 2 * c5 - 114); c6 <= min(min(-54 * c0 - 54 * c1 - 54 * c2 + 53, n - c5 - 1), -116 * c1 - c5); c6 += 1) {
for (int c7 = max(-116 * c1 - 115, c5 + c6); c7 <= min(min(n - 1, -116 * c1), 2 * c5 + c6 - 1); c7 += 1) {
if (2 * c5 + c6 >= c7 + 2) {
S[c6][c7] = max_score(S[c6][-c5 + c7] + S[-c5 + c7 + 1][c7], S[c6][c7]);
if (c7 == c5 + c6) {
S[c6][c5 + c6] = max_score(S[c6][c5 + c6], S[c6 + 1][c5 + c6 - 1] + match(seq[c6], seq[c5 + c6]));
}
}
S[c6][c7] = max_score(S[c6][c5 + c6 - 1] + S[c5 + c6][c7], S[c6][c7]);
if (c7 == c5 + c6) {
S[c6][c5 + c6] = max_score(S[c6][c5 + c6], S[c6 + 1][c5 + c6 - 1] + match(seq[c6], seq[c5 + c6]));
}
}
}
}
}
}
}
double execution_time = omp_get_wtime() - start;
printf("IMPA: %lf\n", execution_time);
write_results(n, execution_time);
printMatrix(S, n, 3);
deallocateMatrix(S, n);
}
void computeDYN4ImperfB(int** table, int n, int *seq) {
int** S = getFullCopy(table, n);
double start = omp_get_wtime();
for (int c0 = max(floord(-n - 13, 28) + 3, floord(-37 * n + 115, 2436) + 2); c0 <= floord(3 * n - 6, 116) + 2; c0 += 1) {
#pragma omp parallel for num_threads(MYTHREADS)
for (int c1 = max(-c0 - (n + 40) / 42 + 2, -((n + 114) / 116)); c1 <= min(min(-c0 + (n - 2) / 58 + 1, floord(-c0, 3)), c0 + floord(-16 * c0 + 15, 37) - 1); c1 += 1) {
for (int c2 = max(c1 + floord(-21 * c0 - 13 * c1 + 21, 50), -((n + 42 * c0 + 42 * c1 + 56) / 100)); c2 <= min(min(-1, -c0 - c1), -((21 * c0 - 37 * c1 + 80) / 79) + 1); c2 += 1) {
for (int c5 = max(21 * c0 - 37 * c1 + 21 * c2 - 77, -58 * c2 - 57); c5 <= min(min(n + 42 * c0 + 42 * c1 + 42 * c2 - 1, -58 * c2), 42 * c0 - 74 * c1 + 42 * c2); c5 += 1) {
for (int c6 = max(-42 * c0 - 42 * c1 - 42 * c2, -116 * c1 - 2 * c5 - 114); c6 <= min(min(-42 * c0 - 42 * c1 - 42 * c2 + 41, n - c5 - 1), -116 * c1 - c5); c6 += 1) {
for (int c7 = max(-116 * c1 - 115, c5 + c6); c7 <= min(min(n - 1, -116 * c1), 2 * c5 + c6 - 1); c7 += 1) {
if (2 * c5 + c6 >= c7 + 2) {
S[c6][c7] = max_score(S[c6][-c5 + c7] + S[-c5 + c7 + 1][c7], S[c6][c7]);
}
S[c6][c7] = max_score(S[c6][c5 + c6 - 1] + S[c5 + c6][c7], S[c6][c7]);
if (c7 == c5 + c6) {
S[c6][c5 + c6] = max_score(S[c6][c5 + c6], S[c6 + 1][c5 + c6 - 1] + match(seq[c6], seq[c5 + c6]));
}
}
}
}
}
}
}
double execution_time = omp_get_wtime() - start;
printf("IMPB: %lf\n", execution_time);
write_results_full(n, execution_time, '\n');
printMatrix(S, n, 4);
deallocateMatrix(S, n);
}
void computeNusMP(int** table, int n, int *seq) {
int** S = getFullCopy(table, n);
//int c3, c1, c4, c5, c9, c10, c11;
double start = omp_get_wtime();
for( int c1 = 1; c1 < n + floord(n - 2, 128); c1 += 1)
#pragma omp parallel for num_threads(MYTHREADS)
for(int c3 = max(0, -n + c1 + 1); c3 <= (c1 - 1) / 129; c3 += 1)
for(int c4 = 0; c4 <= 1; c4 += 1) {
if (c4 == 1) {
for(int c9 = n - c1 + 129 * c3; c9 <= min(n - 1, n - c1 + 129 * c3 + 127); c9 += 1)
for(int c10 = max(0, n - c1 + 129 * c3 - c9 + 1); c10 <= 1; c10 += 1) {
if (c10 == 1) {
S[(n-c1+c3-1)][c9] = max(S[(n-c1+c3-1)][c9], S[(n-c1+c3-1)+1][c9-1] + sigma((n-c1+c3-1), c9));
} else {
for(int c11 = n - c1 + 129 * c3; c11 < c9; c11 += 1)
S[(n-c1+c3-1)][c9] = max(S[(n-c1+c3-1)][c11] + S[c11+1][c9], S[(n-c1+c3-1)][c9]);
}
}
} else {
for(int c5 = 0; c5 <= 8 * c3; c5 += 1)
for(int c9 = n - c1 + 129 * c3; c9 <= min(n - 1, n - c1 + 129 * c3 + 127); c9 += 1)
for(int c11 = n - c1 + c3 + 16 * c5 - 1; c11 <= min(n - c1 + 129 * c3 - 1, n - c1 + c3 + 16 * c5 + 14); c11 += 1)
S[(n-c1+c3-1)][c9] = max(S[(n-c1+c3-1)][c11] + S[c11+1][c9], S[(n-c1+c3-1)][c9]);
}
}
double execution_time = omp_get_wtime() - start;
printf("COMP: %lf\n", execution_time);
write_results(n, execution_time);
printMatrix(S, n, 5);
deallocateMatrix(S, n);
}
void printMatrix(int** matrix, int N, int fileno) {
char filename[10];
sprintf(filename, "nontiled%d", fileno);
FILE* f = fopen(filename, "wt");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
fprintf(f, "%d ", matrix[i][j]);
fprintf(f, "\n");
}
fclose(f);
}
int **getFullCopy(int ** table, int N)
{
int **S = allocateMatrix(N);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
S[i][j] = table[i][j];
return S;
}
int** allocateMatrix(int N) {
int** t = (int**)malloc(sizeof(int*) * N);
for (int i = 0; i < N; i++) {
t[i] = (int*)malloc(sizeof(int) * N);
}
return t;
}
int* allocateVector(int N) {
int* t = (int*)malloc(sizeof(int) * N);
return t;
}
void deallocateMatrix(int **t, int N) {
for (int i = 0; i < N; i++) {
free(t[i]);
}
free(t);
}
void write_results_full(int n, double execution_time, char end_char)
{
FILE* f = fopen("results.txt", "at");
fprintf(f, "%d;%lf%c", n, execution_time, end_char);
fclose(f);
}
void write_results(int n, double execution_time)
{
write_results_full(n, execution_time, ';');
}
int getValue(const char c)
{
/*
* 'A' => 0 -> bitowo 0001 -> 1
* 'G' => 1 -> bitowo 0010 -> 2
* 'C' => 2 -> bitowo 0100 -> 4
* 'U' => 3 -> bitowo 1000 -> 8
*/
if(c=='A') return 1;
if(c=='G') return 2;
if(c=='C') return 4;
if(c=='U') return 8;
return 16;
}
#define PERFORMANCE_TEST 1
int main(void) {
#if PERFORMANCE_TEST==1
const int ZMAX = 8000;
#else
const int ZMAX = 16;
#endif
setlocale(LC_NUMERIC, "Polish");
int** graph = allocateMatrix(ZMAX);
int* seq = allocateVector(ZMAX);
for (int i = 0; i < ZMAX; i++)
for (int j = 0; j < ZMAX; j++)
graph[i][j] = 0;
for (int i = 0; i < ZMAX; i++)
graph[i][i] = 0;
//
const char* seqTest = "GCGUCCACGGCUAGCU";
#if PERFORMANCE_TEST==1
for (int i=0 ; i<ZMAX ; i++)
{
seq[i] = 1 << (rand()%4+1);
}
#else
for (int i = 0; i < ZMAX; i++)
seq[i] = getValue(seqTest[i]);
#endif
int N = 100;// ZMAX - 10;
while (N < ZMAX)
{
//N += 10;
computeNusOriginal(graph, N, seq);
computeNusMP(graph, N, seq);
computeDYN1Imperfect(graph, N, seq);
computeDYN2Perfect(graph, N, seq);
computeDYN3ImperfA(graph, N, seq);
computeDYN4ImperfB(graph, N, seq);
N += 100;
}
deallocateMatrix(graph, ZMAX);
free(seq);
return 0;
}
|
GB_assign_zombie3.c | //------------------------------------------------------------------------------
// GB_assign_zombie3: delete entries in C(:,j) for C_replace_phase
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// For GrB_Row_assign or GrB_Col_assign, C(I,j)<#M,repl>=any must delete all
// entries C(i,j) outside of C(I,j), if the mask M(i,0) (or its complement) is
// zero. This step is not done for GxB_*_subassign, since that method does not
// modify anything outside IxJ.
// GB_assign_zombie3 and GB_assign_zombie4 are transposes of each other.
#include "GB_assign.h"
void GB_assign_zombie3
(
GrB_Matrix Z, // the matrix C, or a copy
const GrB_Matrix M,
const bool Mask_comp,
const bool Mask_struct,
const int64_t j, // vector index with entries to delete
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get Z (:,j)
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Zh = Z->h ;
const int64_t *GB_RESTRICT Zp = Z->p ;
int64_t *GB_RESTRICT Zi = Z->i ;
int64_t pZ_start, pZ_end, pleft = 0, pright = Z->nvec-1 ;
GB_lookup (Z->is_hyper, Zh, Zp, &pleft, pright, j, &pZ_start, &pZ_end) ;
int64_t nzombies = Z->nzombies ;
const int64_t zjnz = pZ_end - pZ_start ;
//--------------------------------------------------------------------------
// get M(:,0)
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Mp = M->p ;
const int64_t *GB_RESTRICT Mi = M->i ;
const GB_void *GB_RESTRICT Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ;
const size_t msize = M->type->size ;
int64_t pM_start = Mp [0] ;
int64_t pM_end = Mp [1] ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (zjnz, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
//--------------------------------------------------------------------------
// delete entries from Z(:,j) that are outside I, if the mask M allows it
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t p1, p2 ;
GB_PARTITION (p1, p2, zjnz, taskid, ntasks) ;
for (int64_t pZ = pZ_start + p1 ; pZ < pZ_start + p2 ; pZ++)
{
//------------------------------------------------------------------
// get Z(i,j)
//------------------------------------------------------------------
int64_t i = Zi [pZ] ;
if (!GB_IS_ZOMBIE (i))
{
//--------------------------------------------------------------
// Z(i,j) is outside Z(I,j) if i is not in the list I
//--------------------------------------------------------------
bool i_outside = !GB_ij_is_in_list (I, nI, i, Ikind, Icolon) ;
if (i_outside)
{
//----------------------------------------------------------
// Z(i,j) is a live entry not in the Z(I,J) submatrix
//----------------------------------------------------------
// Check the mask M to see if it should be deleted.
int64_t pM = pM_start ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright, found) ;
bool mij = false ;
if (found)
{
// found it
mij = GB_mcast (Mx, pM, msize) ;
}
if (Mask_comp)
{
// negate the mask if Mask_comp is true
mij = !mij ;
}
if (!mij)
{
// delete Z(i,j) by marking it as a zombie
nzombies++ ;
Zi [pZ] = GB_FLIP (i) ;
}
}
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
Z->nzombies = nzombies ;
}
|
progress_counter.h | #ifndef TEX_PROGRESSCOUNTER_HEADER
#define TEX_PROGRESSCOUNTER_HEADER
#include <atomic>
#include <fstream>
#include <iostream>
#include <sstream>
#include "util/timer.h"
#include <cmath>
enum ProgressCounterStyle{
ETA,
SIMPLE
};
static const std::string clear = "\r" + std::string(80,' ') + "\r";
/*
* 显示处理进度
* */
class ProgressCounter{
private:
std::ofstream tty;
util::WallTimer timer; // 耗时
std::string task; //
std::size_t max;
std::atomic_size_t count;
public:
ProgressCounter(std::string const & _task, std::size_t max);
template<ProgressCounterStyle T> void progress(void);
void inc(void);
};
inline ProgressCounter::ProgressCounter(std::string const & _task, std::size_t _max): tty("/dev/tty", std::ios_base::out), timer(),task(_task), max(_max), count(0) {}
template<ProgressCounterStyle T>
void ProgressCounter::progress(void){
if ((max > 100 && count % (max / 100) == 0) || max <= 100) {
float percent = static_cast<float>(count) / max;
int ipercent = std::floor(percent * 100.0f + 0.5f);
std::stringstream ss;
ss << clear << task << " " << ipercent << "%...";
if (T == ETA && ipercent > 3){
std::size_t const elapsed = timer.get_elapsed();
std::size_t eta = (elapsed / percent - elapsed) / 1000;
ss << " eta ~ " << eta << " s";
}
#pragma omp critical(progress_counter_progress)
tty << ss.rdbuf() << std::flush;
}
}
inline void ProgressCounter::inc(void){
std::size_t tmp;
tmp = ++count;
if(tmp == max) {
std::stringstream ss;
ss << clear << task << " 100%... 完成. (花费 "
<< timer.get_elapsed_sec() << "秒)";
#pragma omp critical(progress_counter_inc)
std::cout << ss.rdbuf() << std::endl;
}
}
#endif /* TEX_PROGRESSCOUNTER_HEADER */ |
pooling_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"ld1 {v2.4s, v3.4s}, [%2], #32 \n"
"fmax v0.4s, v0.4s, v2.4s \n"
"fmax v1.4s, v1.4s, v3.4s \n"
"fmaxp v2.4s, v0.4s, v1.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v2.4s}, [%3], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"
"vld1.f32 {d4-d7}, [%2]! \n"
"vmax.f32 q0, q0, q2 \n"
"vmax.f32 q1, q1, q3 \n"
"vpmax.f32 d4, d0, d1 \n"
"vpmax.f32 d5, d2, d3 \n"
"subs %0, #1 \n"
"vst1.f32 {d4-d5}, [%3]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = std::max(r0[0], r0[1]);
float max1 = std::max(r1[0], r1[1]);
*outptr = std::max(max0, max1);
r0 += 2;
r1 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
|
gmm.c | /** @file gmm.c
** @brief Gaussian Mixture Models - Implementation
** @author David Novotny
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2013 David Novotny and Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page gmm Gaussian Mixture Models (GMM)
@author David Novotny
@author Andrea Vedaldi
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref gmm.h is an implementation of *Gaussian Mixture Models* (GMMs).
The main functionality provided by this module is learning GMMs from
data by maximum likelihood. Model optimization uses the Expectation
Maximization (EM) algorithm @cite{dempster77maximum}. The
implementation supports @c float or @c double data types, is
parallelized, and is tuned to work reliably and effectively on
datasets of visual features. Stability is obtained in part by
regularizing and restricting the parameters of the GMM.
@ref gmm-starting demonstreates how to use the C API to compute the FV
representation of an image. For further details refer to:
- @subpage gmm-fundamentals
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section gmm-starting Getting started
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
In order to use @ref gmm.h to learn a GMM from training data, create a
new ::VlGMM object instance, set the parameters as desired, and run
the training code. The following example learns @c numClusters
Gaussian components from @c numData vectors of dimension @c dimension
and storage class @c float using at most 100 EM iterations:
@code
float * means ;
float * covariances ;
float * priors ;
float * posteriors ;
double loglikelihood ;
// create a new instance of a GMM object for float data
gmm = vl_gmm_new (VL_TYPE_FLOAT, dimension, numClusters) ;
// set the maximum number of EM iterations to 100
vl_gmm_set_max_num_iterations (gmm, 100) ;
// set the initialization to random selection
vl_gmm_set_initialization (gmm,VlGMMRand);
// cluster the data, i.e. learn the GMM
vl_gmm_cluster (gmm, data, numData);
// get the means, covariances, and priors of the GMM
means = vl_gmm_get_means(gmm);
covariances = vl_gmm_get_covariances(gmm);
priors = vl_gmm_get_priors(gmm);
// get loglikelihood of the estimated GMM
loglikelihood = vl_gmm_get_loglikelihood(gmm) ;
// get the soft assignments of the data points to each cluster
posteriors = vl_gmm_get_posteriors(gmm) ;
@endcode
@note ::VlGMM assumes that the covariance matrices of the GMM are
diagonal. This reduces significantly the number of parameters to learn
and is usually an acceptable compromise in vision applications. If the
data is significantly correlated, it can be beneficial to de-correlate
it by PCA rotation or projection in pre-processing.
::vl_gmm_get_loglikelihood is used to get the final loglikelihood of
the estimated mixture, ::vl_gmm_get_means and ::vl_gmm_get_covariances
to obtain the means and the diagonals of the covariance matrices of
the estimated Gaussian modes, and ::vl_gmm_get_posteriors to get the
posterior probabilities that a given point is associated to each of
the modes (soft assignments).
The learning algorithm, which uses EM, finds a local optimum of the
objective function. Therefore the initialization is crucial in
obtaining a good model, measured in term of the final
loglikelihood. ::VlGMM supports a few methods (use
::vl_gmm_set_initialization to choose one) as follows:
Method | ::VlGMMInitialization enumeration | Description
----------------------|-----------------------------------------|-----------------------------------------------
Random initialization | ::VlGMMRand | Random initialization of the mixture parameters
KMeans | ::VlGMMKMeans | Initialization of the mixture parameters using ::VlKMeans
Custom | ::VlGMMCustom | User specified initialization
Note that in the case of ::VlGMMKMeans initialization, an object of
type ::VlKMeans object must be created and passed to the ::VlGMM
instance (see @ref kmeans to see how to correctly set up this object).
When a user wants to use the ::VlGMMCustom method, the initial means,
covariances and priors have to be specified using the
::vl_gmm_set_means, ::vl_gmm_set_covariances and ::vl_gmm_set_priors
methods.
**/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page gmm-fundamentals GMM fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
A *Gaussian Mixture Model* (GMM) is a mixture of $K$ multivariate
Gaussian distributions. In order to sample from a GMM, one samples
first the component index $k \in \{1,\dots,K\}$ with *prior
probability* $\pi_k$, and then samples the vector $\bx \in
\mathbb{R}^d$ from the $k$-th Gaussian distribution
$p(\bx|\mu_k,\Sigma_k)$. Here $\mu_k$ and $\Sigma_k$ are respectively
the *mean* and *covariance* of the distribution. The GMM is completely
specified by the parameters $\Theta=\{\pi_k,\mu_k,\Sigma_k; k =
1,\dots,K\}$
The density $p(\bx|\Theta)$ induced on the training data is obtained
by marginalizing the component selector $k$, obtaining
\[
p(\bx|\Theta)
= \sum_{k=1}^{K} \pi_k p( \bx_i |\mu_k,\Sigma_k),
\qquad
p( \bx |\mu_k,\Sigma_k)
=
\frac{1}{\sqrt{(2\pi)^d\det\Sigma_k}}
\exp\left[
-\frac{1}{2} (\bx-\mu_k)^\top\Sigma_k^{-1}(\bx-\mu_k)
\right].
\]
Learning a GMM to fit a dataset $X=(\bx_1, \dots, \bx_n)$ is usually
done by maximizing the log-likelihood of the data:
@f[
\ell(\Theta;X)
= E_{\bx\sim\hat p} [ \log p(\bx|\Theta) ]
= \frac{1}{n}\sum_{i=1}^{n} \log \sum_{k=1}^{K} \pi_k p(\bx_i|\mu_k, \Sigma_k)
@f]
where $\hat p$ is the empirical distribution of the data. An algorithm
to solve this problem is introduced next.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section gmm-em Learning a GMM by expectation maximization
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The direct maximization of the log-likelihood function of a GMM is
difficult due to the fact that the assignments of points to Gaussian
mode is not observable and, as such, must be treated as a latent
variable.
Usually, GMMs are learned by using the *Expectation Maximization* (EM)
algorithm @cite{dempster77maximum}. Consider in general the problem of
estimating to the maximum likelihood a distribution $p(x|\Theta) =
\int p(x,h|\Theta)\,dh$, where $x$ is a measurement, $h$ is a *latent
variable*, and $\Theta$ are the model parameters. By introducing an
auxiliary distribution $q(h|x)$ on the latent variable, one can use
Jensen inequality to obtain the following lower bound on the
log-likelihood:
@f{align*}
\ell(\Theta;X) =
E_{x\sim\hat p} \log p(x|\Theta)
&= E_{x\sim\hat p} \log \int p(x,h|\Theta) \,dh \\
&= E_{x\sim\hat p} \log \int \frac{p(x,h|\Theta)}{q(h|x)} q(h|x)\,dh \\
&\geq E_{x\sim\hat p} \int q(h) \log \frac{p(x,h|\Theta)}{q(h|x)}\,dh \\
&= E_{(x,q) \sim q(h|x) \hat p(x)} \log p(x,h|\Theta) -
E_{(x,q) \sim q(h|x) \hat p(x)} \log q(h|x)
@f}
The first term of the last expression is the log-likelihood of the
model where both the $x$ and $h$ are observed and joinlty distributed
as $q(x|h)\hat p(x)$; the second term is the a average entropy of the
latent variable, which does not depend on $\Theta$. This lower bound
is maximized and becomes tight by setting $q(h|x) = p(h|x,\Theta)$ to
be the posterior distribution on the latent variable $h$ (given the
current estimate of the parameters $\Theta$). In fact:
\[
E_{x \sim \hat p} \log p(x|\Theta)
=
E_{(x,h) \sim p(h|x,\Theta) \hat p(x)}\left[ \log \frac{p(x,h|\Theta)}{p(h|x,\Theta)} \right]
=
E_{(x,h) \sim p(h|x,\Theta) \hat p(x)} [ \log p(x|\Theta) ]
=
\ell(\Theta;X).
\]
EM alternates between updating the latent variable auxiliary
distribution $q(h|x) = p(h|x,\Theta_t)$ (*expectation step*) given the
current estimate of the parameters $\Theta_t$, and then updating the
model parameters $\Theta_{t+1}$ by maximizing the log-likelihood lower
bound derived (*maximization step*). The simplification is that in the
maximization step both $x$ and $h$ are now ``observed'' quantities.
This procedure converges to a local optimum of the model
log-likelihood.
@subsection gmm-expectation-step Expectation step
In the case of a GMM, the latent variables are the point-to-cluster
assignments $k_i, i=1,\dots,n$, one for each of $n$ data points. The
auxiliary distribution $q(k_i|\bx_i) = q_{ik}$ is a matrix with $n
\times K$ entries. Each row $q_{i,:}$ can be thought of as a vector of
soft assignments of the data points $\bx_i$ to each of the Gaussian
modes. Setting $q_{ik} = p(k_i | \bx_i, \Theta)$ yields
\[
q_{ik} =
\frac
{\pi_k p(\bx_i|\mu_k,\Sigma_k)}
{\sum_{l=1}^K \pi_l p(\bx_i|\mu_l,\Sigma_l)}
\]
where the Gaussian density $p(\bx_i|\mu_k,\Sigma_k)$ was given above.
One important point to keep in mind when these probabilities are
computed is the fact that the Gaussian densities may attain very low
values and underflow in a vanilla implementation. Furthermore, VLFeat
GMM implementation restricts the covariance matrices to be
diagonal. In this case, the computation of the determinant of
$\Sigma_k$ reduces to computing the trace of the matrix and the
inversion of $\Sigma_k$ could be obtained by inverting the elements on
the diagonal of the covariance matrix.
@subsection gmm-maximization-step Maximization step
The M step estimates the parameters of the Gaussian mixture components
and the prior probabilities $\pi_k$ given the auxiliary distribution
on the point-to-cluster assignments computed in the E step. Since all
the variables are now ``observed'', the estimate is quite simple. For
example, the mean $\mu_k$ of a Gaussian mode is obtained as the mean
of the data points assigned to it (accounting for the strength of the
soft assignments). The other quantities are obtained in a similar
manner, yielding to:
@f{align*}
\mu_k &= { { \sum_{i=1}^n q_{ik} \bx_{i} } \over { \sum_{i=1}^n q_{ik} } },
\\
\Sigma_k &= { { \sum_{i=1}^n { q_{ik} (\bx_{i} - \mu_{k}) {(\bx_{i} - \mu_{k})}^T } } \over { \sum_{i=1}^n q_{ik} } },
\\
\pi_k &= { \sum_{i=1}^n { q_{ik} } \over { \sum_{i=1}^n \sum_{l=1}^K q_{il} } }.
@f}
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section gmm-fundamentals-init Initialization algorithms
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The EM algorithm is a local optimization method. As such, the quality
of the solution strongly depends on the quality of the initial values
of the parameters (i.e. of the locations and shapes of the Gaussian
modes).
@ref gmm.h supports the following cluster initialization algorithms:
- <b>Random data points.</b> (::vl_gmm_init_with_rand_data) This method
sets the means of the modes by sampling at random a corresponding
number of data points, sets the covariance matrices of all the modes
are to the covariance of the entire dataset, and sets the prior
probabilities of the Gaussian modes to be uniform. This
initialization method is the fastest, simplest, as well as the one
most likely to end in a bad local minimum.
- <b>KMeans initialization</b> (::vl_gmm_init_with_kmeans) This
method uses KMeans to pre-cluster the points. It then sets the means
and covariances of the Gaussian distributions the sample means and
covariances of each KMeans cluster. It also sets the prior
probabilities to be proportional to the mass of each cluster. In
order to use this initialization method, a user can specify an
instance of ::VlKMeans by using the function
::vl_gmm_set_kmeans_init_object, or let ::VlGMM create one
automatically.
Alternatively, one can manually specify a starting point
(::vl_gmm_set_priors, ::vl_gmm_set_means, ::vl_gmm_set_covariances).
**/
#include "gmm.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifndef VL_DISABLE_SSE2
#include "mathop_sse2.h"
#endif
#ifndef VL_DISABLE_AVX
#include "mathop_avx.h"
#endif
/* ---------------------------------------------------------------- */
#ifndef VL_GMM_INSTANTIATING
/* ---------------------------------------------------------------- */
#define VL_GMM_MIN_VARIANCE 1e-6
#define VL_GMM_MIN_POSTERIOR 1e-2
#define VL_GMM_MIN_PRIOR 1e-6
struct _VlGMM
{
vl_type dataType ; /**< Data type. */
vl_size dimension ; /**< Data dimensionality. */
vl_size numClusters ; /**< Number of clusters */
vl_size numData ; /**< Number of last time clustered data points. */
vl_size maxNumIterations ; /**< Maximum number of refinement iterations. */
vl_size numRepetitions ; /**< Number of clustering repetitions. */
int verbosity ; /**< Verbosity level. */
void * means; /**< Means of Gaussian modes. */
void * covariances; /**< Diagonals of covariance matrices of Gaussian modes. */
void * priors; /**< Weights of Gaussian modes. */
void * posteriors; /**< Probabilities of correspondences of points to clusters. */
double * sigmaLowBound ; /**< Lower bound on the diagonal covariance values. */
VlGMMInitialization initialization; /**< Initialization option */
VlKMeans * kmeansInit; /**< Kmeans object for initialization of gaussians */
double LL ; /**< Current solution loglikelihood */
vl_bool kmeansInitIsOwner; /**< Indicates whether a user provided the kmeans initialization object */
} ;
/* ---------------------------------------------------------------- */
/* Life-cycle */
/* ---------------------------------------------------------------- */
static void
_vl_gmm_prepare_for_data (VlGMM* self, vl_size numData)
{
if (self->numData < numData) {
vl_free(self->posteriors) ;
self->posteriors = vl_malloc(vl_get_type_size(self->dataType) * numData * self->numClusters) ;
}
self->numData = numData ;
}
/** @brief Create a new GMM object
** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE)
** @param dimension dimension of the data.
** @param numComponents number of Gaussian mixture components.
** @return new GMM object instance.
**/
VlGMM *
vl_gmm_new (vl_type dataType, vl_size dimension, vl_size numComponents)
{
vl_index i ;
vl_size size = vl_get_type_size(dataType) ;
VlGMM * self = vl_calloc(1, sizeof(VlGMM)) ;
self->dataType = dataType;
self->numClusters = numComponents ;
self->numData = 0;
self->dimension = dimension ;
self->initialization = VlGMMRand;
self->verbosity = 0 ;
self->maxNumIterations = 50;
self->numRepetitions = 1;
self->sigmaLowBound = NULL ;
self->priors = NULL ;
self->covariances = NULL ;
self->means = NULL ;
self->posteriors = NULL ;
self->kmeansInit = NULL ;
self->kmeansInitIsOwner = VL_FALSE;
self->priors = vl_calloc (numComponents, size) ;
self->means = vl_calloc (numComponents * dimension, size) ;
self->covariances = vl_calloc (numComponents * dimension, size) ;
self->sigmaLowBound = vl_calloc (dimension, sizeof(double)) ;
for (i = 0 ; i < (unsigned)self->dimension ; ++i) { self->sigmaLowBound[i] = 1e-4 ; }
return self ;
}
/** @brief Reset state
** @param self object.
**
** The function reset the state of the GMM object. It deletes
** any stored posterior and other internal state variables.
**/
void
vl_gmm_reset (VlGMM * self)
{
if (self->posteriors) {
vl_free(self->posteriors) ;
self->posteriors = NULL ;
self->numData = 0 ;
}
if (self->kmeansInit && self->kmeansInitIsOwner) {
vl_kmeans_delete(self->kmeansInit) ;
self->kmeansInit = NULL ;
self->kmeansInitIsOwner = VL_FALSE ;
}
}
/** @brief Deletes a GMM object
** @param self GMM object instance.
**
** The function deletes the GMM object instance created
** by ::vl_gmm_new.
**/
void
vl_gmm_delete (VlGMM * self)
{
if(self->means) vl_free(self->means);
if(self->covariances) vl_free(self->covariances);
if(self->priors) vl_free(self->priors);
if(self->posteriors) vl_free(self->posteriors);
if(self->kmeansInit && self->kmeansInitIsOwner) {
vl_kmeans_delete(self->kmeansInit);
}
vl_free(self);
}
/* ---------------------------------------------------------------- */
/* Getters and setters */
/* ---------------------------------------------------------------- */
/** @brief Get data type
** @param self object
** @return data type.
**/
vl_type
vl_gmm_get_data_type (VlGMM const * self)
{
return self->dataType ;
}
/** @brief Get the number of clusters
** @param self object
** @return number of clusters.
**/
vl_size
vl_gmm_get_num_clusters (VlGMM const * self)
{
return self->numClusters ;
}
/** @brief Get the number of data points
** @param self object
** @return number of data points.
**/
vl_size
vl_gmm_get_num_data (VlGMM const * self)
{
return self->numData ;
}
/** @brief Get the log likelihood of the current mixture
** @param self object
** @return loglikelihood.
**/
double
vl_gmm_get_loglikelihood (VlGMM const * self)
{
return self->LL ;
}
/** @brief Get verbosity level
** @param self object
** @return verbosity level.
**/
int
vl_gmm_get_verbosity (VlGMM const * self)
{
return self->verbosity ;
}
/** @brief Set verbosity level
** @param self object
** @param verbosity verbosity level.
**/
void
vl_gmm_set_verbosity (VlGMM * self, int verbosity)
{
self->verbosity = verbosity ;
}
/** @brief Get means
** @param self object
** @return cluster means.
**/
void const *
vl_gmm_get_means (VlGMM const * self)
{
return self->means ;
}
/** @brief Get covariances
** @param self object
** @return diagonals of cluster covariance matrices.
**/
void const *
vl_gmm_get_covariances (VlGMM const * self)
{
return self->covariances ;
}
/** @brief Get priors
** @param self object
** @return priors of cluster gaussians.
**/
void const *
vl_gmm_get_priors (VlGMM const * self)
{
return self->priors ;
}
/** @brief Get posteriors
** @param self object
** @return posterior probabilities of cluster memberships.
**/
void const *
vl_gmm_get_posteriors (VlGMM const * self)
{
return self->posteriors ;
}
/** @brief Get maximum number of iterations
** @param self object
** @return maximum number of iterations.
**/
vl_size
vl_gmm_get_max_num_iterations (VlGMM const * self)
{
return self->maxNumIterations ;
}
/** @brief Set maximum number of iterations
** @param self VlGMM filter.
** @param maxNumIterations maximum number of iterations.
**/
void
vl_gmm_set_max_num_iterations (VlGMM * self, vl_size maxNumIterations)
{
self->maxNumIterations = maxNumIterations ;
}
/** @brief Get maximum number of repetitions.
** @param self object
** @return current number of repretitions for quantization.
**/
vl_size
vl_gmm_get_num_repetitions (VlGMM const * self)
{
return self->numRepetitions ;
}
/** @brief Set maximum number of repetitions
** @param self object
** @param numRepetitions maximum number of repetitions.
** The number of repetitions cannot be smaller than 1.
**/
void
vl_gmm_set_num_repetitions (VlGMM * self, vl_size numRepetitions)
{
assert (numRepetitions >= 1) ;
self->numRepetitions = numRepetitions ;
}
/** @brief Get data dimension
** @param self object
** @return data dimension.
**/
vl_size
vl_gmm_get_dimension (VlGMM const * self)
{
return self->dimension ;
}
/** @brief Get initialization algorithm
** @param self object
** @return initialization algorithm.
**/
VlGMMInitialization
vl_gmm_get_initialization (VlGMM const * self)
{
return self->initialization ;
}
/** @brief Set initialization algorithm.
** @param self object
** @param init initialization algorithm.
**/
void
vl_gmm_set_initialization (VlGMM * self, VlGMMInitialization init)
{
self->initialization = init;
}
/** @brief Get KMeans initialization object.
** @param self object
** @return kmeans initialization object.
**/
VlKMeans * vl_gmm_get_kmeans_init_object (VlGMM const * self)
{
return self->kmeansInit;
}
/** @brief Set KMeans initialization object.
** @param self object
** @param kmeans initialization KMeans object.
**/
void vl_gmm_set_kmeans_init_object (VlGMM * self, VlKMeans * kmeans)
{
if (self->kmeansInit && self->kmeansInitIsOwner) {
vl_kmeans_delete(self->kmeansInit) ;
}
self->kmeansInit = kmeans;
self->kmeansInitIsOwner = VL_FALSE;
}
/** @brief Get the lower bound on the diagonal covariance values.
** @param self object
** @return lower bound on covariances.
**/
double const * vl_gmm_get_covariance_lower_bounds (VlGMM const * self)
{
return self->sigmaLowBound;
}
/** @brief Set the lower bounds on diagonal covariance values.
** @param self object.
** @param bounds bounds.
**
** There is one lower bound per dimension. Use ::vl_gmm_set_covariance_lower_bound
** to set all of them to a given scalar.
**/
void vl_gmm_set_covariance_lower_bounds (VlGMM * self, double const * bounds)
{
memcpy(self->sigmaLowBound, bounds, sizeof(double) * self->dimension) ;
}
/** @brief Set the lower bounds on diagonal covariance values.
** @param self object.
** @param bound bound.
**
** While there is one lower bound per dimension, this function sets
** all of them to the specified scalar. Use ::vl_gmm_set_covariance_lower_bounds
** to set them individually.
**/
void vl_gmm_set_covariance_lower_bound (VlGMM * self, double bound)
{
int i ;
for (i = 0 ; i < (signed)self->dimension ; ++i) {
self->sigmaLowBound[i] = bound ;
}
}
/* ---------------------------------------------------------------- */
/* Instantiate shuffle algorithm */
#define VL_SHUFFLE_type vl_uindex
#define VL_SHUFFLE_prefix _vl_gmm
#include "shuffle-def.h"
/* #ifdef VL_GMM_INSTANTITATING */
#endif
/* ---------------------------------------------------------------- */
#ifdef VL_GMM_INSTANTIATING
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* Posterior assignments */
/* ---------------------------------------------------------------- */
/** @fn vl_get_gmm_data_posterior_f(float*,vl_size,vl_size,float const*,float const*,vl_size,float const*,float const*)
** @brief Get Gaussian modes posterior probabilities
** @param posteriors posterior probabilities (output)/
** @param numClusters number of modes in the GMM model.
** @param numData number of data elements.
** @param priors prior mode probabilities of the GMM model.
** @param means means of the GMM model.
** @param dimension data dimension.
** @param covariances diagonal covariances of the GMM model.
** @param data data.
** @return data log-likelihood.
**
** This is a helper function that does not require a ::VlGMM object
** instance to operate.
**/
double
VL_XCAT(vl_get_gmm_data_posteriors_, SFX)
(TYPE * posteriors,
vl_size numClusters,
vl_size numData,
TYPE const * priors,
TYPE const * means,
vl_size dimension,
TYPE const * covariances,
TYPE const * data)
{
vl_index i_d, i_cl;
vl_size dim;
double LL = 0;
TYPE halfDimLog2Pi = (dimension / 2.0) * log(2.0*VL_PI);
TYPE * logCovariances ;
TYPE * logWeights ;
TYPE * invCovariances ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_f(VlDistanceMahalanobis) ;
#else
VlDoubleVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_d(VlDistanceMahalanobis) ;
#endif
logCovariances = vl_malloc(sizeof(TYPE) * numClusters) ;
invCovariances = vl_malloc(sizeof(TYPE) * numClusters * dimension) ;
logWeights = vl_malloc(sizeof(TYPE) * numClusters) ;
#if defined(_OPENMP)
#pragma omp parallel for private(i_cl,dim) num_threads(vl_get_max_threads())
#endif
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE logSigma = 0 ;
if (priors[i_cl] < VL_GMM_MIN_PRIOR) {
logWeights[i_cl] = - (TYPE) VL_INFINITY_D ;
} else {
logWeights[i_cl] = log(priors[i_cl]);
}
for(dim = 0 ; dim < dimension ; ++ dim) {
logSigma += log(covariances[i_cl*dimension + dim]);
invCovariances [i_cl*dimension + dim] = (TYPE) 1.0 / covariances[i_cl*dimension + dim];
}
logCovariances[i_cl] = logSigma;
} /* end of parallel region */
#if defined(_OPENMP)
#pragma omp parallel for private(i_cl,i_d) reduction(+:LL) \
num_threads(vl_get_max_threads())
#endif
for (i_d = 0 ; i_d < (signed)numData ; ++ i_d) {
TYPE clusterPosteriorsSum = 0;
TYPE maxPosterior = (TYPE)(-VL_INFINITY_D) ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE p =
logWeights[i_cl]
- halfDimLog2Pi
- 0.5 * logCovariances[i_cl]
- 0.5 * distFn (dimension,
data + i_d * dimension,
means + i_cl * dimension,
invCovariances + i_cl * dimension) ;
posteriors[i_cl + i_d * numClusters] = p ;
if (p > maxPosterior) { maxPosterior = p ; }
}
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
TYPE p = posteriors[i_cl + i_d * numClusters] ;
p = exp(p - maxPosterior) ;
posteriors[i_cl + i_d * numClusters] = p ;
clusterPosteriorsSum += p ;
}
LL += log(clusterPosteriorsSum) + (double) maxPosterior ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
posteriors[i_cl + i_d * numClusters] /= clusterPosteriorsSum ;
}
} /* end of parallel region */
vl_free(logCovariances);
vl_free(logWeights);
vl_free(invCovariances);
return LL;
}
/* ---------------------------------------------------------------- */
/* Restarts zero-weighted Gaussians */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_maximization_, SFX)
(VlGMM * self,
TYPE * posteriors,
TYPE * priors,
TYPE * covariances,
TYPE * means,
TYPE const * data,
vl_size numData) ;
static vl_size
VL_XCAT(_vl_gmm_restart_empty_modes_, SFX) (VlGMM * self, TYPE const * data)
{
vl_size dimension = self->dimension;
vl_size numClusters = self->numClusters;
vl_index i_cl, j_cl, i_d, d;
vl_size zeroWNum = 0;
TYPE * priors = (TYPE*)self->priors ;
TYPE * means = (TYPE*)self->means ;
TYPE * covariances = (TYPE*)self->covariances ;
TYPE * posteriors = (TYPE*)self->posteriors ;
//VlRand * rand = vl_get_rand() ;
TYPE * mass = vl_calloc(sizeof(TYPE), self->numClusters) ;
if (numClusters <= 1) { return 0 ; }
/* compute statistics */
{
vl_uindex i, k ;
vl_size numNullAssignments = 0 ;
for (i = 0 ; i < self->numData ; ++i) {
for (k = 0 ; k < self->numClusters ; ++k) {
TYPE p = ((TYPE*)self->posteriors)[k + i * self->numClusters] ;
mass[k] += p ;
if (p < VL_GMM_MIN_POSTERIOR) {
numNullAssignments ++ ;
}
}
}
if (self->verbosity) {
VL_PRINTF("gmm: sparsity of data posterior: %.1f%%\n", (double)numNullAssignments / (self->numData * self->numClusters) * 100) ;
}
}
#if 0
/* search for cluster with negligible weight and reassign them to fat clusters */
for (i_cl = 0 ; i_cl < numClusters ; ++i_cl) {
if (priors[i_cl] < 0.00001/numClusters) {
double mass = priors[0] ;
vl_index best = 0 ;
for (j_cl = 1 ; j_cl < numClusters ; ++j_cl) {
if (priors[j_cl] > mass) { mass = priors[j_cl] ; best = j_cl ; }
}
if (j_cl == i_cl) {
/* this should never happen */
continue ;
}
j_cl = best ;
zeroWNum ++ ;
VL_PRINTF("gmm: restarting mode %d by splitting mode %d (with prior %f)\n", i_cl,j_cl,mass) ;
priors[i_cl] = mass/2 ;
priors[j_cl] = mass/2 ;
for (d = 0 ; d < dimension ; ++d) {
TYPE sigma2 = covariances[j_cl*dimension + d] ;
TYPE sigma = VL_XCAT(vl_sqrt_,SFX)(sigma2) ;
means[i_cl*dimension + d] = means[j_cl*dimension + d] + 0.001 * (vl_rand_real1(rand) - 0.5) * sigma ;
covariances[i_cl*dimension + d] = sigma2 ;
}
}
}
#endif
/* search for cluster with negligible weight and reassign them to fat clusters */
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
double size = - VL_INFINITY_D ;
vl_index best = -1 ;
if (mass[i_cl] >= VL_GMM_MIN_POSTERIOR *
VL_MAX(1.0, (double) self->numData / self->numClusters))
{
continue ;
}
if (self->verbosity) {
VL_PRINTF("gmm: mode %d is nearly empty (mass %f)\n", i_cl, mass[i_cl]) ;
}
/*
Search for the Gaussian components that (approximately)
maximally contribute to make the negative log-likelihood of the data
large. Then split the worst offender.
To do so, we approximate the exptected log-likelihood of the GMM:
E[-log(f(x))] = H(f) = - log \int f(x) log f(x)
where the density f(x) = sum_k pk gk(x) is a GMM. This is intractable
but it is easy to approximate if we suppose that supp gk is disjoint with
supp gq for all components k ~= q. In this canse
H(f) ~= sum_k [ - pk log(pk) + pk H(gk) ]
where H(gk) is the entropy of component k taken alone. The entropy of
the latter is given by:
H(gk) = D/2 (1 + log(2pi) + 1/2 sum_{i=0}^D log sigma_i^2
*/
for (j_cl = 0 ; j_cl < (signed)numClusters ; ++j_cl) {
double size_ ;
if (priors[j_cl] < VL_GMM_MIN_PRIOR) { continue ; }
size_ = + 0.5 * dimension * (1.0 + log(2*VL_PI)) ;
for(d = 0 ; d < (signed)dimension ; d++) {
double sigma2 = covariances[j_cl * dimension + d] ;
size_ += 0.5 * log(sigma2) ;
}
size_ = priors[j_cl] * (size_ - log(priors[j_cl])) ;
if (self->verbosity > 1) {
VL_PRINTF("gmm: mode %d: prior %f, mass %f, entropy contribution %f\n",
j_cl, priors[j_cl], mass[j_cl], size_) ;
}
if (size_ > size) {
size = size_ ;
best = j_cl ;
}
}
j_cl = best ;
if (j_cl == i_cl || j_cl < 0) {
if (self->verbosity) {
VL_PRINTF("gmm: mode %d is empty, "
"but no other mode to split could be found\n", i_cl) ;
}
continue ;
}
if (self->verbosity) {
VL_PRINTF("gmm: reinitializing empty mode %d with mode %d (prior %f, mass %f, score %f)\n",
i_cl, j_cl, priors[j_cl], mass[j_cl], size) ;
}
/*
Search for the dimension with maximum variance.
*/
size = - VL_INFINITY_D ;
best = - 1 ;
for(d = 0; d < (signed)dimension; d++) {
double sigma2 = covariances[j_cl * dimension + d] ;
if (sigma2 > size) {
size = sigma2 ;
best = d ;
}
}
/*
Reassign points j_cl (mode to split) to i_cl (empty mode).
*/
{
TYPE mu = means[best + j_cl * self->dimension] ;
for(i_d = 0 ; i_d < (signed)self->numData ; ++ i_d) {
TYPE p = posteriors[j_cl + self->numClusters * i_d] ;
TYPE q = posteriors[i_cl + self->numClusters * i_d] ; /* ~= 0 */
if (data[best + i_d * self->dimension] < mu) {
/* assign this point to i_cl */
posteriors[i_cl + self->numClusters * i_d] = p + q ;
posteriors[j_cl + self->numClusters * i_d] = 0 ;
} else {
/* assign this point to j_cl */
posteriors[i_cl + self->numClusters * i_d] = 0 ;
posteriors[j_cl + self->numClusters * i_d] = p + q ;
}
}
}
/*
Re-estimate.
*/
VL_XCAT(_vl_gmm_maximization_, SFX)
(self,posteriors,priors,covariances,means,data,self->numData) ;
}
return zeroWNum;
}
/* ---------------------------------------------------------------- */
/* Helpers */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_apply_bounds_, SFX)(VlGMM * self)
{
vl_uindex dim ;
vl_uindex k ;
vl_size numAdjusted = 0 ;
TYPE * cov = (TYPE*)self->covariances ;
double const * lbs = self->sigmaLowBound ;
for (k = 0 ; k < self->numClusters ; ++k) {
vl_bool adjusted = VL_FALSE ;
for (dim = 0 ; dim < self->dimension ; ++dim) {
if (cov[k * self->dimension + dim] < lbs[dim] ) {
cov[k * self->dimension + dim] = lbs[dim] ;
adjusted = VL_TRUE ;
}
}
if (adjusted) { numAdjusted ++ ; }
}
if (numAdjusted > 0 && self->verbosity > 0) {
VL_PRINT("gmm: detected %d of %d modes with at least one dimension "
"with covariance too small (set to lower bound)\n",
numAdjusted, self->numClusters) ;
}
}
/* ---------------------------------------------------------------- */
/* EM - Maximization step */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_maximization_, SFX)
(VlGMM * self,
TYPE * posteriors,
TYPE * priors,
TYPE * covariances,
TYPE * means,
TYPE const * data,
vl_size numData)
{
vl_size numClusters = self->numClusters;
vl_index i_d, i_cl;
vl_size dim ;
TYPE * oldMeans ;
double time = 0 ;
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: entering maximization step\n") ;
time = vl_get_cpu_time() ;
}
oldMeans = vl_malloc(sizeof(TYPE) * self->dimension * numClusters) ;
memcpy(oldMeans, means, sizeof(TYPE) * self->dimension * numClusters) ;
memset(priors, 0, sizeof(TYPE) * numClusters) ;
memset(means, 0, sizeof(TYPE) * self->dimension * numClusters) ;
memset(covariances, 0, sizeof(TYPE) * self->dimension * numClusters) ;
#if defined(_OPENMP)
#pragma omp parallel default(shared) private(i_d, i_cl, dim) \
num_threads(vl_get_max_threads())
#endif
{
TYPE * clusterPosteriorSum_, * means_, * covariances_ ;
#if defined(_OPENMP)
#pragma omp critical
#endif
{
clusterPosteriorSum_ = vl_calloc(sizeof(TYPE), numClusters) ;
means_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ;
covariances_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ;
}
/*
Accumulate weighted sums and sum of square differences. Once normalized,
these become the means and covariances of each Gaussian mode.
The squared differences will be taken w.r.t. the old means however. In this manner,
one avoids doing two passes across the data. Eventually, these are corrected to account
for the new means properly. In principle, one could set the old means to zero, but
this may cause numerical instabilities (by accumulating large squares).
*/
#if defined(_OPENMP)
#pragma omp for
#endif
for (i_d = 0 ; i_d < (signed)numData ; ++i_d) {
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
TYPE p = posteriors[i_cl + i_d * self->numClusters] ;
vl_bool calculated = VL_FALSE ;
/* skip very small associations for speed */
if (p < VL_GMM_MIN_POSTERIOR / numClusters) { continue ; }
clusterPosteriorSum_ [i_cl] += p ;
#ifndef VL_DISABLE_AVX
if (vl_get_simd_enabled() && vl_cpu_has_avx()) {
VL_XCAT(_vl_weighted_mean_avx_, SFX)
(self->dimension,
means_+ i_cl * self->dimension,
data + i_d * self->dimension,
p) ;
VL_XCAT(_vl_weighted_sigma_avx_, SFX)
(self->dimension,
covariances_ + i_cl * self->dimension,
data + i_d * self->dimension,
oldMeans + i_cl * self->dimension,
p) ;
calculated = VL_TRUE;
}
#endif
#ifndef VL_DISABLE_SSE2
if (vl_get_simd_enabled() && vl_cpu_has_sse2() && !calculated) {
VL_XCAT(_vl_weighted_mean_sse2_, SFX)
(self->dimension,
means_+ i_cl * self->dimension,
data + i_d * self->dimension,
p) ;
VL_XCAT(_vl_weighted_sigma_sse2_, SFX)
(self->dimension,
covariances_ + i_cl * self->dimension,
data + i_d * self->dimension,
oldMeans + i_cl * self->dimension,
p) ;
calculated = VL_TRUE;
}
#endif
if(!calculated) {
for (dim = 0 ; dim < self->dimension ; ++dim) {
TYPE x = data[i_d * self->dimension + dim] ;
TYPE mu = oldMeans[i_cl * self->dimension + dim] ;
TYPE diff = x - mu ;
means_ [i_cl * self->dimension + dim] += p * x ;
covariances_ [i_cl * self->dimension + dim] += p * (diff*diff) ;
}
}
}
}
/* accumulate */
#if defined(_OPENMP)
#pragma omp critical
#endif
{
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
priors [i_cl] += clusterPosteriorSum_ [i_cl];
for (dim = 0 ; dim < self->dimension ; ++dim) {
means [i_cl * self->dimension + dim] += means_ [i_cl * self->dimension + dim] ;
covariances [i_cl * self->dimension + dim] += covariances_ [i_cl * self->dimension + dim] ;
}
}
vl_free(means_);
vl_free(covariances_);
vl_free(clusterPosteriorSum_);
}
} /* parallel section */
/* at this stage priors[] contains the total mass of each cluster */
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE mass = priors[i_cl] ;
/* do not update modes that do not recieve mass */
if (mass >= 1e-6 / numClusters) {
for (dim = 0 ; dim < self->dimension ; ++dim) {
means[i_cl * self->dimension + dim] /= mass ;
covariances[i_cl * self->dimension + dim] /= mass ;
}
}
}
/* apply old to new means correction */
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
TYPE mass = priors[i_cl] ;
if (mass >= 1e-6 / numClusters) {
for (dim = 0 ; dim < self->dimension ; ++dim) {
TYPE mu = means[i_cl * self->dimension + dim] ;
TYPE oldMu = oldMeans[i_cl * self->dimension + dim] ;
TYPE diff = mu - oldMu ;
covariances[i_cl * self->dimension + dim] -= diff * diff ;
}
}
}
VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ;
{
TYPE sum = 0;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
sum += priors[i_cl] ;
}
sum = VL_MAX(sum, 1e-12) ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
priors[i_cl] /= sum ;
}
}
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: maximization step completed in %.2f s\n",
vl_get_cpu_time() - time) ;
}
vl_free(oldMeans);
}
/* ---------------------------------------------------------------- */
/* EM iterations */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_gmm_em_, SFX)
(VlGMM * self,
TYPE const * data,
vl_size numData)
{
vl_size iteration, restarted ;
double previousLL = (TYPE)(-VL_INFINITY_D) ;
double LL = (TYPE)(-VL_INFINITY_D) ;
double time = 0 ;
_vl_gmm_prepare_for_data (self, numData) ;
VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ;
for (iteration = 0 ; 1 ; ++ iteration) {
double eps ;
/*
Expectation: assign data to Gaussian modes
and compute log-likelihood.
*/
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: entering expectation step\n") ;
time = vl_get_cpu_time() ;
}
LL = VL_XCAT(vl_get_gmm_data_posteriors_,SFX)
(self->posteriors,
self->numClusters,
numData,
self->priors,
self->means,
self->dimension,
self->covariances,
data) ;
if (self->verbosity > 1) {
VL_PRINTF("gmm: em: expectation step completed in %.2f s\n",
vl_get_cpu_time() - time) ;
}
/*
Check the termination conditions.
*/
if (self->verbosity) {
VL_PRINTF("gmm: em: iteration %d: loglikelihood = %f (variation = %f)\n",
iteration, LL, LL - previousLL) ;
}
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("gmm: em: terminating because "
"the maximum number of iterations "
"(%d) has been reached.\n", self->maxNumIterations) ;
}
break ;
}
eps = vl_abs_d ((LL - previousLL) / (LL));
if ((iteration > 0) && (eps < 0.00001)) {
if (self->verbosity) {
VL_PRINTF("gmm: em: terminating because the algorithm "
"fully converged (log-likelihood variation = %f).\n", eps) ;
}
break ;
}
previousLL = LL ;
/*
Restart empty modes.
*/
if (iteration > 1) {
restarted = VL_XCAT(_vl_gmm_restart_empty_modes_, SFX)
(self, data);
if ((restarted > 0) & (self->verbosity > 0)) {
VL_PRINTF("gmm: em: %d Gaussian modes restarted because "
"they had become empty.\n", restarted);
}
}
/*
Maximization: reestimate the GMM parameters.
*/
VL_XCAT(_vl_gmm_maximization_, SFX)
(self,self->posteriors,self->priors,self->covariances,self->means,data,numData) ;
}
return LL;
}
/* ---------------------------------------------------------------- */
/* Kmeans initialization of mixtures */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_init_with_kmeans_, SFX)
(VlGMM * self,
TYPE const * data,
vl_size numData,
VlKMeans * kmeansInit)
{
vl_size i_d ;
vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData);
_vl_gmm_prepare_for_data (self, numData) ;
memset(self->means,0,sizeof(TYPE) * self->numClusters * self->dimension) ;
memset(self->priors,0,sizeof(TYPE) * self->numClusters) ;
memset(self->covariances,0,sizeof(TYPE) * self->numClusters * self->dimension) ;
memset(self->posteriors,0,sizeof(TYPE) * self->numClusters * numData) ;
/* setup speified KMeans initialization object if any */
if (kmeansInit) { vl_gmm_set_kmeans_init_object (self, kmeansInit) ; }
/* if a KMeans initalization object is still unavailable, create one */
if(self->kmeansInit == NULL) {
vl_size ncomparisons = VL_MAX(numData / 4, 10) ;
vl_size niter = 5 ;
vl_size ntrees = 1 ;
vl_size nrepetitions = 1 ;
VlKMeansAlgorithm algorithm = VlKMeansANN ;
VlKMeansInitialization initialization = VlKMeansRandomSelection ;
VlKMeans * kmeansInitDefault = vl_kmeans_new(self->dataType,VlDistanceL2) ;
vl_kmeans_set_initialization(kmeansInitDefault, initialization);
vl_kmeans_set_max_num_iterations (kmeansInitDefault, niter) ;
vl_kmeans_set_max_num_comparisons (kmeansInitDefault, ncomparisons) ;
vl_kmeans_set_num_trees (kmeansInitDefault, ntrees);
vl_kmeans_set_algorithm (kmeansInitDefault, algorithm);
vl_kmeans_set_num_repetitions(kmeansInitDefault, nrepetitions);
vl_kmeans_set_verbosity (kmeansInitDefault, self->verbosity);
self->kmeansInit = kmeansInitDefault;
self->kmeansInitIsOwner = VL_TRUE ;
}
/* Use k-means to assign data to clusters */
vl_kmeans_cluster (self->kmeansInit, data, self->dimension, numData, self->numClusters);
vl_kmeans_quantize (self->kmeansInit, assignments, NULL, data, numData) ;
/* Transform the k-means assignments in posteriors and estimates the mode parameters */
for(i_d = 0; i_d < numData; i_d++) {
((TYPE*)self->posteriors)[assignments[i_d] + i_d * self->numClusters] = (TYPE) 1.0 ;
}
/* Update cluster parameters */
VL_XCAT(_vl_gmm_maximization_, SFX)
(self,self->posteriors,self->priors,self->covariances,self->means,data,numData);
vl_free(assignments) ;
}
/* ---------------------------------------------------------------- */
/* Random initialization of mixtures */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_gmm_compute_init_sigma_, SFX)
(VlGMM * self,
TYPE const * data,
TYPE * initSigma,
vl_size dimension,
vl_size numData)
{
vl_size dim;
vl_uindex i;
TYPE * dataMean ;
memset(initSigma,0,sizeof(TYPE)*dimension) ;
if (numData <= 1) return ;
dataMean = vl_malloc(sizeof(TYPE)*dimension);
memset(dataMean,0,sizeof(TYPE)*dimension) ;
/* find mean of the whole dataset */
for(dim = 0 ; dim < dimension ; dim++) {
for(i = 0 ; i < numData ; i++) {
dataMean[dim] += data[i*dimension + dim];
}
dataMean[dim] /= numData;
}
/* compute variance of the whole dataset */
for(dim = 0; dim < dimension; dim++) {
for(i = 0; i < numData; i++) {
TYPE diff = (data[i*self->dimension + dim] - dataMean[dim]) ;
initSigma[dim] += diff*diff ;
}
initSigma[dim] /= numData - 1 ;
}
vl_free(dataMean) ;
}
static void
VL_XCAT(_vl_gmm_init_with_rand_data_, SFX)
(VlGMM * self,
TYPE const * data,
vl_size numData)
{
vl_uindex i, k, dim ;
VlKMeans * kmeans ;
_vl_gmm_prepare_for_data(self, numData) ;
/* initilaize priors of gaussians so they are equal and sum to one */
for (i = 0 ; i < self->numClusters ; ++i) { ((TYPE*)self->priors)[i] = (TYPE) (1.0 / self->numClusters) ; }
/* initialize diagonals of covariance matrices to data covariance */
VL_XCAT(_vl_gmm_compute_init_sigma_, SFX) (self, data, self->covariances, self->dimension, numData);
for (k = 1 ; k < self->numClusters ; ++ k) {
for(dim = 0; dim < self->dimension; dim++) {
*((TYPE*)self->covariances + k * self->dimension + dim) =
*((TYPE*)self->covariances + dim) ;
}
}
/* use kmeans++ initialization to pick points at random */
kmeans = vl_kmeans_new(self->dataType,VlDistanceL2) ;
vl_kmeans_init_centers_plus_plus(kmeans, data, self->dimension, numData, self->numClusters) ;
memcpy(self->means, vl_kmeans_get_centers(kmeans), sizeof(TYPE) * self->dimension * self->numClusters) ;
vl_kmeans_delete(kmeans) ;
}
/* ---------------------------------------------------------------- */
#else /* VL_GMM_INSTANTIATING */
/* ---------------------------------------------------------------- */
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_GMM_INSTANTIATING
#include "gmm.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_GMM_INSTANTIATING
#include "gmm.c"
#endif
/* VL_GMM_INSTANTIATING */
#endif
/* ---------------------------------------------------------------- */
#ifndef VL_GMM_INSTANTIATING
/* ---------------------------------------------------------------- */
/** @brief Create a new GMM object by copy
** @param self object.
** @return new copy.
**
** Most parameters, including the cluster priors, means, and
** covariances are copied. Data posteriors (available after
** initalization or EM) are not; nor is the KMeans object used for
** initialization, if any.
**/
VlGMM *
vl_gmm_new_copy (VlGMM const * self)
{
vl_size size = vl_get_type_size(self->dataType) ;
VlGMM * gmm = vl_gmm_new(self->dataType, self->dimension, self->numClusters);
gmm->initialization = self->initialization;
gmm->maxNumIterations = self->maxNumIterations;
gmm->numRepetitions = self->numRepetitions;
gmm->verbosity = self->verbosity;
gmm->LL = self->LL;
memcpy(gmm->means, self->means, size*self->numClusters*self->dimension);
memcpy(gmm->covariances, self->covariances, size*self->numClusters*self->dimension);
memcpy(gmm->priors, self->priors, size*self->numClusters);
return gmm ;
}
/** @brief Initialize mixture before EM takes place using random initialization
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
**/
void
vl_gmm_init_with_rand_data
(VlGMM * self,
void const * data,
vl_size numData)
{
vl_gmm_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT : _vl_gmm_init_with_rand_data_f (self, (float const *)data, numData) ; break ;
case VL_TYPE_DOUBLE : _vl_gmm_init_with_rand_data_d (self, (double const *)data, numData) ; break ;
default:
abort() ;
}
}
/** @brief Initializes the GMM using KMeans
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
** @param kmeansInit KMeans object to use.
**/
void
vl_gmm_init_with_kmeans
(VlGMM * self,
void const * data,
vl_size numData,
VlKMeans * kmeansInit)
{
vl_gmm_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_gmm_init_with_kmeans_f
(self, (float const *)data, numData, kmeansInit) ;
break ;
case VL_TYPE_DOUBLE :
_vl_gmm_init_with_kmeans_d
(self, (double const *)data, numData, kmeansInit) ;
break ;
default:
abort() ;
}
}
#if 0
#include<fenv.h>
#endif
/** @brief Run GMM clustering - includes initialization and EM
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
**/
double vl_gmm_cluster (VlGMM * self,
void const * data,
vl_size numData)
{
void * bestPriors = NULL ;
void * bestMeans = NULL;
void * bestCovariances = NULL;
void * bestPosteriors = NULL;
vl_size size = vl_get_type_size(self->dataType) ;
double bestLL = -VL_INFINITY_D;
vl_uindex repetition;
assert(self->numRepetitions >=1) ;
bestPriors = vl_malloc(size * self->numClusters) ;
bestMeans = vl_malloc(size * self->dimension * self->numClusters) ;
bestCovariances = vl_malloc(size * self->dimension * self->numClusters) ;
bestPosteriors = vl_malloc(size * self->numClusters * numData) ;
#if 0
feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
#endif
for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) {
double LL ;
double timeRef ;
if (self->verbosity) {
VL_PRINTF("gmm: clustering: starting repetition %d of %d\n", repetition + 1, self->numRepetitions) ;
}
/* initialize a new mixture model */
timeRef = vl_get_cpu_time() ;
switch (self->initialization) {
case VlGMMKMeans : vl_gmm_init_with_kmeans (self, data, numData, NULL) ; break ;
case VlGMMRand : vl_gmm_init_with_rand_data (self, data, numData) ; break ;
case VlGMMCustom : break ;
default: abort() ;
}
if (self->verbosity) {
VL_PRINTF("gmm: model initialized in %.2f s\n",
vl_get_cpu_time() - timeRef) ;
}
/* fit the model to data by running EM */
timeRef = vl_get_cpu_time () ;
LL = vl_gmm_em (self, data, numData) ;
if (self->verbosity) {
VL_PRINTF("gmm: optimization terminated in %.2f s with loglikelihood %f\n",
vl_get_cpu_time() - timeRef, LL) ;
}
if (LL > bestLL || repetition == 0) {
void * temp ;
temp = bestPriors ;
bestPriors = self->priors ;
self->priors = temp ;
temp = bestMeans ;
bestMeans = self->means ;
self->means = temp ;
temp = bestCovariances ;
bestCovariances = self->covariances ;
self->covariances = temp ;
temp = bestPosteriors ;
bestPosteriors = self->posteriors ;
self->posteriors = temp ;
bestLL = LL;
}
}
vl_free (self->priors) ;
vl_free (self->means) ;
vl_free (self->covariances) ;
vl_free (self->posteriors) ;
self->priors = bestPriors ;
self->means = bestMeans ;
self->covariances = bestCovariances ;
self->posteriors = bestPosteriors ;
self->LL = bestLL;
if (self->verbosity) {
VL_PRINTF("gmm: all repetitions terminated with final loglikelihood %f\n", self->LL) ;
}
return bestLL ;
}
/** @brief Invoke the EM algorithm.
** @param self GMM object instance.
** @param data data points which should be clustered.
** @param numData number of data points.
**/
double vl_gmm_em (VlGMM * self, void const * data, vl_size numData)
{
switch (self->dataType) {
case VL_TYPE_FLOAT:
return _vl_gmm_em_f (self, (float const *)data, numData) ; break ;
case VL_TYPE_DOUBLE:
return _vl_gmm_em_d (self, (double const *)data, numData) ; break ;
default:
abort() ;
}
return 0 ;
}
/** @brief Explicitly set the initial means for EM.
** @param self GMM object instance.
** @param means initial values of means.
**/
void
vl_gmm_set_means (VlGMM * self, void const * means)
{
memcpy(self->means,means,
self->dimension * self->numClusters * vl_get_type_size(self->dataType));
}
/** @brief Explicitly set the initial sigma diagonals for EM.
** @param self GMM object instance.
** @param covariances initial values of covariance matrix diagonals.
**/
void vl_gmm_set_covariances (VlGMM * self, void const * covariances)
{
memcpy(self->covariances,covariances,
self->dimension * self->numClusters * vl_get_type_size(self->dataType));
}
/** @brief Explicitly set the initial priors of the gaussians.
** @param self GMM object instance.
** @param priors initial values of the gaussian priors.
**/
void vl_gmm_set_priors (VlGMM * self, void const * priors)
{
memcpy(self->priors,priors,
self->numClusters * vl_get_type_size(self->dataType));
}
/* VL_GMM_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_GMM_INSTANTIATING
|
fdtd-2d.pluto.par.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include <omp.h>
#define tmax T
#define nx N
#define ny N
double ex[nx][ny +1];
double ey[nx +1][ny];
double hz[nx][ny];
void init_arrays()
{
int i, j;
for (i=0; i<nx+1; i++) {
for (j=0; j<ny; j++) {
ey[i][j] = 0;
}
}
for (i=0; i<nx; i++) {
for (j=0; j<ny+1; j++) {
ex[i][j] = 0;
}
}
for (j=0; j<ny; j++) {
ey[0][j] = ((double)j)/ny;
}
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
hz[i][j] = 0;
}
}
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int main()
{
init_arrays();
int annot_i;
annot_t_start = rtclock();
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define S1(zT0,zT1,zT2,zT3,t,j) {ey[0][j]=t;}
#define S2(zT0,zT1,zT2,zT3,zT4,zT5,t,i,j) {ey[i][j]=ey[i][j]-((double)(1))/2*(hz[i][j]-hz[i-1][j]);}
#define S3(zT0,zT1,zT2,zT3,zT4,zT5,t,i,j) {ex[i][j]=ex[i][j]-((double)(1))/2*(hz[i][j]-hz[i][j-1]);}
#define S4(zT0,zT1,zT2,zT3,zT4,zT5,t,i,j) {hz[i][j]=hz[i][j]-((double)(7))/10*(ey[1+i][j]+ex[i][1+j]-ex[i][j]-ey[i][j]);}
int c1, c2, c3, c4, c5, c6, c7, c8, c9;
register int lb, ub, lb1, ub1, lb2, ub2;
register int lbv, ubv;
/* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 18.06s. */
for (c1=-1;c1<=floord(2*tmax+ny-2,256);c1++) {
lb1=max(max(ceild(256*c1-tmax+1,256),ceild(256*c1-255,512)),0);
ub1=min(min(floord(tmax+ny-1,256),floord(256*c1+255,256)),floord(256*c1+ny+255,512));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9)
for (c2=lb1; c2<=ub1; c2++) {
for (c3=max(max(max(max(ceild(256*c2-ny-254,256),0),ceild(512*c1-768*c2-509,256)),ceild(256*c1-65536*c2-129795,65280)),ceild(256*c1-256*c2-255,256));c3<=min(min(floord(256*c2+nx+254,256),floord(tmax+nx-1,256)),floord(256*c1-256*c2+nx+255,256));c3++) {
for (c4=max(max(max(max(0,ceild(256*c3-nx-31,32)),ceild(256*c2-ny-31,32)),8*c1-8*c2),8*c1-1800*c2-1792*c3-3563);c4<=min(min(min(min(min(8*c1-8*c2+7,floord(256*c2+255,32)),floord(256*c2+256*c3+509,64)),floord(tmax-1,32)),floord(256*c3+255,32)),floord(7936*c2+7936*c3+15779,32));c4++) {
for (c5=max(max(max(max(max(max(8*c2,0),ceild(-7936*c3+32*c4-8835,992)),ceild(8*c1-8*c2-1792*c3-c4-1995,224)),ceild(-256*c3+64*c4-285,32)),ceild(32*c4-31,32)),ceild(256*c3-nx-30,32));c5<=min(min(min(8*c2+7,floord(256*c3+ny+254,32)),floord(tmax+ny-1,32)),floord(32*c4+ny+31,32));c5++) {
for (c6=max(max(max(max(max(max(max(8*c3,ceild(32*c5-ny-30,32)),0),ceild(32*c4-992*c5-1891,992)),ceild(8*c1-8*c2-8*c3-c4-224*c5-434,223)),ceild(-8*c1+8*c2+8*c3+c4-224*c5-434,225)),ceild(64*c4-32*c5-61,32)),ceild(32*c4-31,32));c6<=min(min(min(floord(tmax+nx-1,32),8*c3+7),floord(32*c4+nx+31,32)),floord(32*c5+nx+30,32));c6++) {
if ((c4 <= floord(32*c5-ny,32)) && (c5 >= max(ceild(32*c6-nx+ny,32),ceild(ny,32)))) {
for (c9=max(32*c5-ny+1,32*c6);c9<=min(32*c5+nx-ny,32*c6+31);c9++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,32*c5-ny,-32*c5+c9+ny-1,ny-1) ;
}
}
if ((c4 <= floord(32*c6-nx,32)) && (c5 <= floord(32*c6-nx+ny-1,32)) && (c6 >= ceild(nx,32))) {
for (c8=max(32*c5,32*c6-nx+1);c8<=min(32*c5+31,32*c6-nx+ny);c8++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,32*c6-nx,nx-1,-32*c6+c8+nx-1) ;
}
}
if ((c1 == c2+c3) && (c4 == c6)) {
for (c7=max(max(32*c6,0),32*c5-ny+1);c7<=min(min(32*c5-ny+31,tmax-1),32*c6+30);c7++) {
for (c8=32*c5;c8<=c7+ny-1;c8++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5,c7,-c7+c8) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,0,-c7+c8) ;
for (c9=c7+1;c9<=32*c6+31;c9++) {
S2(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9,-c7+c8) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9,-c7+c8) ;
S4(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9-1,-c7+c8-1) ;
}
}
for (c9=c7+1;c9<=32*c6+31;c9++) {
S4(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9-1,ny-1) ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6)) {
for (c7=max(max(32*c6,0),32*c5-ny+32);c7<=min(min(tmax-1,32*c6+30),32*c5-1);c7++) {
for (c8=32*c5;c8<=32*c5+31;c8++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5,c7,-c7+c8) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,0,-c7+c8) ;
for (c9=c7+1;c9<=32*c6+31;c9++) {
S2(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9,-c7+c8) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9,-c7+c8) ;
S4(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9-1,-c7+c8-1) ;
}
}
}
}
if ((c1 == c2+c3) && (c4 == c6)) {
for (c7=max(max(32*c5,32*c6),0);c7<=min(min(tmax-1,32*c6+30),32*c5+30);c7++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5,c7,0) ;
for (c9=c7+1;c9<=32*c6+31;c9++) {
S2(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9,0) ;
}
for (c8=c7+1;c8<=32*c5+31;c8++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5,c7,-c7+c8) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,0,-c7+c8) ;
for (c9=c7+1;c9<=32*c6+31;c9++) {
S2(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9,-c7+c8) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9,-c7+c8) ;
S4(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,c7,-c7+c9-1,-c7+c8-1) ;
}
}
}
}
for (c7=max(max(max(32*c5-ny+1,32*c4),0),32*c6-nx+1);c7<=min(min(min(32*c6-nx+31,tmax-1),32*c4+31),32*c5-ny+31);c7++) {
for (c8=32*c5;c8<=c7+ny-1;c8++) {
for (c9=32*c6;c9<=c7+nx-1;c9++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9-1,-c7+c8-1) ;
}
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,nx-1,-c7+c8-1) ;
}
for (c9=32*c6;c9<=c7+nx;c9++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9-1,ny-1) ;
}
}
for (c7=max(max(max(32*c4,32*c6-nx+1),0),32*c5-ny+32);c7<=min(min(min(tmax-1,32*c4+31),32*c5-1),32*c6-nx+31);c7++) {
for (c8=32*c5;c8<=32*c5+31;c8++) {
for (c9=32*c6;c9<=c7+nx-1;c9++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9-1,-c7+c8-1) ;
}
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,nx-1,-c7+c8-1) ;
}
}
for (c7=max(max(max(32*c6-nx+32,32*c5-ny+1),32*c4),0);c7<=min(min(min(32*c6-1,tmax-1),32*c4+31),32*c5-ny+31);c7++) {
for (c8=32*c5;c8<=c7+ny-1;c8++) {
for (c9=32*c6;c9<=32*c6+31;c9++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9-1,-c7+c8-1) ;
}
}
for (c9=32*c6;c9<=32*c6+31;c9++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9-1,ny-1) ;
}
}
for (c7=max(max(max(32*c5,32*c4),32*c6-nx+1),0);c7<=min(min(min(tmax-1,32*c4+31),32*c5+30),32*c6-nx+31);c7++) {
for (c9=32*c6;c9<=c7+nx-1;c9++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,0) ;
}
for (c8=c7+1;c8<=32*c5+31;c8++) {
for (c9=32*c6;c9<=c7+nx-1;c9++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9-1,-c7+c8-1) ;
}
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,nx-1,-c7+c8-1) ;
}
}
for (c7=max(max(max(0,32*c4),32*c6-nx+32),32*c5-ny+32);c7<=min(min(min(32*c6-1,tmax-1),32*c4+31),32*c5-1);c7++) {
/*@ begin Loop(
transform UnrollJam(ufactor=8)
for (c8=32*c5;c8<=32*c5+31;c8++)
transform Unroll(ufactor=8)
for (c9=32*c6;c9<=32*c6+31;c9++)
{
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9-1,-c7+c8-1) ;
}
) @*/{
for (c8 = 32 * c5; c8 <= 32 * c5 + 31 - 7; c8 = c8 + 8) {
for (c9 = 32 * c6; c9 <= 32 * c6 + 31 - 7; c9 = c9 + 8) {
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 1));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 1));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 1) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 1));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 1));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1) - 1, -c7 + (c8 + 1) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 1));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 1));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2) - 1, -c7 + (c8 + 1) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 1));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 1));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3) - 1, -c7 + (c8 + 1) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 1));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 1));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4) - 1, -c7 + (c8 + 1) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 1));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 1));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5) - 1, -c7 + (c8 + 1) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 1));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 1));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6) - 1, -c7 + (c8 + 1) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 1));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 1));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7) - 1, -c7 + (c8 + 1) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 2));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 2));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 2) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 2));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 2));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1) - 1, -c7 + (c8 + 2) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 2));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 2));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2) - 1, -c7 + (c8 + 2) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 2));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 2));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3) - 1, -c7 + (c8 + 2) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 2));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 2));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4) - 1, -c7 + (c8 + 2) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 2));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 2));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5) - 1, -c7 + (c8 + 2) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 2));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 2));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6) - 1, -c7 + (c8 + 2) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 2));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 2));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7) - 1, -c7 + (c8 + 2) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 3));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 3));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 3) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 3));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 3));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1) - 1, -c7 + (c8 + 3) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 3));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 3));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2) - 1, -c7 + (c8 + 3) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 3));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 3));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3) - 1, -c7 + (c8 + 3) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 3));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 3));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4) - 1, -c7 + (c8 + 3) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 3));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 3));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5) - 1, -c7 + (c8 + 3) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 3));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 3));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6) - 1, -c7 + (c8 + 3) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 3));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 3));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7) - 1, -c7 + (c8 + 3) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 4));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 4));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 4) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 4));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 4));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1) - 1, -c7 + (c8 + 4) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 4));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 4));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2) - 1, -c7 + (c8 + 4) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 4));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 4));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3) - 1, -c7 + (c8 + 4) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 4));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 4));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4) - 1, -c7 + (c8 + 4) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 4));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 4));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5) - 1, -c7 + (c8 + 4) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 4));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 4));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6) - 1, -c7 + (c8 + 4) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 4));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 4));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7) - 1, -c7 + (c8 + 4) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 5));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 5));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 5) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 5));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 5));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1) - 1, -c7 + (c8 + 5) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 5));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 5));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2) - 1, -c7 + (c8 + 5) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 5));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 5));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3) - 1, -c7 + (c8 + 5) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 5));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 5));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4) - 1, -c7 + (c8 + 5) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 5));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 5));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5) - 1, -c7 + (c8 + 5) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 5));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 5));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6) - 1, -c7 + (c8 + 5) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 5));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 5));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7) - 1, -c7 + (c8 + 5) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 6));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 6));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 6) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 6));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 6));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1) - 1, -c7 + (c8 + 6) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 6));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 6));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2) - 1, -c7 + (c8 + 6) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 6));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 6));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3) - 1, -c7 + (c8 + 6) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 6));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 6));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4) - 1, -c7 + (c8 + 6) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 6));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 6));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5) - 1, -c7 + (c8 + 6) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 6));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 6));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6) - 1, -c7 + (c8 + 6) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 6));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 6));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7) - 1, -c7 + (c8 + 6) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 7));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 7));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 7) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 7));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + (c8 + 7));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1) - 1, -c7 + (c8 + 7) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 7));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + (c8 + 7));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2) - 1, -c7 + (c8 + 7) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 7));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + (c8 + 7));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3) - 1, -c7 + (c8 + 7) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 7));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + (c8 + 7));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4) - 1, -c7 + (c8 + 7) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 7));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + (c8 + 7));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5) - 1, -c7 + (c8 + 7) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 7));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + (c8 + 7));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6) - 1, -c7 + (c8 + 7) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 7));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + (c8 + 7));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7) - 1, -c7 + (c8 + 7) - 1);
}
for (; c9 <= 32 * c6 + 31; c9 = c9 + 1) {
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 1));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 1));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 1) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 2));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 2));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 2) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 3));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 3));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 3) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 4));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 4));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 4) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 5));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 5));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 5) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 6));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 6));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 6) - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 7));
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + (c8 + 7));
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + (c8 + 7) - 1);
}
}
for (; c8 <= 32 * c5 + 31; c8 = c8 + 1) {
for (c9 = 32 * c6; c9 <= 32 * c6 + 31 - 7; c9 = c9 + 8) {
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 1) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 2) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 3) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 4) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 5) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 6) - 1, -c7 + c8 - 1);
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7), -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + (c9 + 7) - 1, -c7 + c8 - 1);
}
for (; c9 <= 32 * c6 + 31; c9 = c9 + 1) {
S2(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + c8);
S3(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9, -c7 + c8);
S4(c1 - c2, -c1 + c2 + c3, -c1 + 2 * c2, c4, -c4 + c6, -c4 + c5, c7, -c7 + c9 - 1, -c7 + c8 - 1);
}
}
}
/*@ end @*/
}
for (c7=max(max(max(32*c5,32*c6-nx+32),0),32*c4);c7<=min(min(min(32*c6-1,tmax-1),32*c4+31),32*c5+30);c7++) {
for (c9=32*c6;c9<=32*c6+31;c9++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,0) ;
}
for (c8=c7+1;c8<=32*c5+31;c8++) {
for (c9=32*c6;c9<=32*c6+31;c9++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9,-c7+c8) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,c7,-c7+c9-1,-c7+c8-1) ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6) && (c5 <= min(floord(32*c6-1,32),floord(tmax-32,32)))) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5,32*c5+31,0) ;
for (c9=32*c5+32;c9<=32*c6+31;c9++) {
S2(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,32*c5+31,-32*c5+c9-31,0) ;
}
}
if ((c1 == c2+c3) && (-c4 == -c6) && (c4 >= ceild(32*c5-31,32)) && (c4 <= min(floord(32*c5-1,32),floord(tmax-32,32)))) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5,32*c4+31,0) ;
for (c8=32*c4+32;c8<=32*c5+31;c8++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5,32*c4+31,-32*c4+c8-31) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,32*c4+31,0,-32*c4+c8-31) ;
}
}
if ((c1 == c2+c3) && (-c4 == -c6) && (c4 <= min(floord(tmax-32,32),c5-1))) {
for (c8=32*c5;c8<=min(32*c5+31,32*c4+ny+30);c8++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5,32*c4+31,-32*c4+c8-31) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5,32*c4+31,0,-32*c4+c8-31) ;
}
}
if ((c1 == c2+c3) && (-c4 == -c5) && (-c4 == -c6) && (c4 <= floord(tmax-32,32))) {
S1(c1-c2,-c1+2*c2,c4,0,32*c4+31,0) ;
}
if ((c4 >= c5) && (c5 <= min(c6-1,floord(tmax-32,32)))) {
for (c9=32*c6;c9<=min(32*c5+nx+30,32*c6+31);c9++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5,32*c5+31,-32*c5+c9-31,0) ;
}
}
}
}
}
}
}
}
/* End of CLooG code */
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
annot_t_total = annot_t_total / REPS;
printf("%f\n", annot_t_total);
return 0; //((int) hz[0][0]);
}
|
NodeMapping.h |
/*****************************************************************************
*
* Copyright (c) 2003-2018 by The University of Queensland
* http://www.uq.edu.au
*
* Primary Business: Queensland, Australia
* Licensed under the Apache License, version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Development until 2012 by Earth Systems Science Computational Center (ESSCC)
* Development 2012-2013 by School of Earth Sciences
* Development from 2014 by Centre for Geoscience Computing (GeoComp)
*
*****************************************************************************/
#ifndef __DUDLEY_NODEMAPPING_H__
#define __DUDLEY_NODEMAPPING_H__
#include "Util.h"
namespace dudley {
/// NodeMapping provides a mapping from the local nodes typically to the
/// degrees of freedom, the reduced degrees of freedom or the reduced node set
struct NodeMapping
{
NodeMapping() : numNodes(0), target(NULL), numTargets(0), map(NULL) {}
/// resets both map and target
void clear()
{
delete[] map;
delete[] target;
target = NULL;
map = NULL;
numNodes = 0;
numTargets = 0;
}
/// initializes a node mapping. The target array is copied and a reverse
/// map created.
/// theTarget[i]=unused means that no target is defined for FEM node i.
void assign(const index_t* theTarget, dim_t nNodes, index_t unused)
{
clear();
if (nNodes == 0)
return;
numNodes = nNodes;
std::pair<index_t,index_t> range(
util::getFlaggedMinMaxInt(numNodes, theTarget, unused));
if (range.first < 0) {
throw escript::ValueError("NodeMapping: target has negative entry.");
}
numTargets = range.first<=range.second ? range.second+1 : 0;
target = new index_t[numNodes];
map = new index_t[numTargets];
bool err = false;
#pragma omp parallel
{
#pragma omp for
for (index_t i=0; i<numNodes; ++i) {
target[i] = theTarget[i];
if (target[i] != unused)
map[target[i]] = i;
}
// sanity check
#pragma omp for
for (index_t i=0; i<numTargets; ++i) {
if (map[i] == -1) {
#pragma omp critical
err = true;
}
}
}
if (err)
throw escript::ValueError("NodeMapping: target does not define a continuous labeling.");
}
/// returns the number of target nodes (number of items in the map array)
inline dim_t getNumTargets() const { return numTargets; }
/// size of `target` (number of FEM nodes)
dim_t numNodes;
/// target[i] defines the target of FEM node i=0,...,numNodes
index_t* target;
/// size of `map` (number of target nodes, e.g. DOF, reduced DOF, etc.)
dim_t numTargets;
/// maps the target nodes back to the FEM nodes: target[map[i]]=i
index_t* map;
};
} // namespace dudley
#endif // __DUDLEY_NODEMAPPING_H__
|
MatrixFreeSolver.h | #ifndef __MatrixFreeSolver_H__
#define __MatrixFreeSolver_H__
#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Sparse>
using SystemMatrixType = Eigen::SparseMatrix<Real>;
namespace SPH
{
class MatrixReplacement;
}
namespace Eigen
{
namespace internal
{
template<> struct traits<SPH::MatrixReplacement> : public Eigen::internal::traits<SystemMatrixType> {};
}
}
namespace SPH
{
/** Replacement of the matrix in the linear system which is required for a
* matrix-free solver. */
class MatrixReplacement : public Eigen::EigenBase<MatrixReplacement>
{
public:
// Required typedefs, constants, and method:
typedef Real Scalar;
typedef Real RealScalar;
typedef int StorageIndex;
typedef void(*MatrixVecProdFct) (const Real*, Real*, void *);
enum
{
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic,
IsRowMajor = false
};
Index rows() const { return m_dim; }
Index cols() const { return m_dim; }
template<typename Rhs>
Eigen::Product<MatrixReplacement, Rhs, Eigen::AliasFreeProduct> operator*(const Eigen::MatrixBase<Rhs>& x) const
{
return Eigen::Product<MatrixReplacement, Rhs, Eigen::AliasFreeProduct>(*this, x.derived());
}
MatrixReplacement(const unsigned int dim, MatrixVecProdFct fct, void *userData) : m_dim(dim), m_matrixVecProdFct(fct), m_userData(userData) {}
void * getUserData() { return m_userData; }
MatrixVecProdFct getMatrixVecProdFct() { return m_matrixVecProdFct; }
protected:
unsigned int m_dim;
void *m_userData;
/** matrix vector product callback */
MatrixVecProdFct m_matrixVecProdFct;
};
/** Matrix-free Jacobi preconditioner */
class JacobiPreconditioner1D
{
public:
typedef typename SystemMatrixType::StorageIndex StorageIndex;
typedef void(*DiagonalMatrixElementFct) (const unsigned int, Real&, void *);
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
JacobiPreconditioner1D() {}
void init(const unsigned int dim, DiagonalMatrixElementFct fct, void *userData)
{
m_dim = dim; m_diagonalElementFct = fct; m_userData = userData;
}
Eigen::Index rows() const { return m_dim; }
Eigen::Index cols() const { return m_dim; }
Eigen::ComputationInfo info() { return Eigen::Success; }
template<typename MatType>
JacobiPreconditioner1D& analyzePattern(const MatType&) { return *this; }
template<typename MatType>
JacobiPreconditioner1D& factorize(const MatType& mat) { return *this; }
template<typename MatType>
JacobiPreconditioner1D& compute(const MatType& mat)
{
m_invDiag.resize(m_dim);
#pragma omp parallel default(shared)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int)m_dim; i++)
{
Real res;
m_diagonalElementFct(i, res, m_userData);
m_invDiag[i] = static_cast<Real>(1.0) / res;
}
}
return *this;
}
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_invDiag.array() * b.array();
}
template<typename Rhs>
inline const Eigen::Solve<JacobiPreconditioner1D, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const
{
return Eigen::Solve<JacobiPreconditioner1D, Rhs>(*this, b.derived());
}
protected:
unsigned int m_dim;
/** diagonal matrix element callback */
DiagonalMatrixElementFct m_diagonalElementFct;
void *m_userData;
VectorXr m_invDiag;
};
/** Matrix-free Jacobi preconditioner */
class JacobiPreconditioner3D
{
public:
typedef typename SystemMatrixType::StorageIndex StorageIndex;
typedef void(*DiagonalMatrixElementFct) (const unsigned int, Vector3r&, void *);
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
JacobiPreconditioner3D() {}
void init(const unsigned int dim, DiagonalMatrixElementFct fct, void *userData)
{
m_dim = dim; m_diagonalElementFct = fct; m_userData = userData;
}
Eigen::Index rows() const { return 3*m_dim; }
Eigen::Index cols() const { return 3*m_dim; }
Eigen::ComputationInfo info() { return Eigen::Success; }
template<typename MatType>
JacobiPreconditioner3D& analyzePattern(const MatType&) { return *this; }
template<typename MatType>
JacobiPreconditioner3D& factorize(const MatType& mat) { return *this; }
template<typename MatType>
JacobiPreconditioner3D& compute(const MatType& mat)
{
m_invDiag.resize(m_dim*3);
#pragma omp parallel default(shared)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int)m_dim; i++)
{
Vector3r res;
m_diagonalElementFct(i, res, m_userData);
m_invDiag[3*i] = static_cast<Real>(1.0) / res[0];
m_invDiag[3*i+1] = static_cast<Real>(1.0) / res[1];
m_invDiag[3*i+2] = static_cast<Real>(1.0) / res[2];
}
}
return *this;
}
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_invDiag.array() * b.array();
}
template<typename Rhs>
inline const Eigen::Solve<JacobiPreconditioner3D, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const
{
return Eigen::Solve<JacobiPreconditioner3D, Rhs>(*this, b.derived());
}
protected:
unsigned int m_dim;
/** diagonal matrix element callback */
DiagonalMatrixElementFct m_diagonalElementFct;
void *m_userData;
VectorXr m_invDiag;
};
/** Matrix-free 3x3 block Jacobi preconditioner */
class BlockJacobiPreconditioner3D
{
public:
typedef typename SystemMatrixType::StorageIndex StorageIndex;
typedef void(*DiagonalMatrixElementFct) (const unsigned int, Matrix3r&, void *);
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
BlockJacobiPreconditioner3D() {}
void init(const unsigned int dim, DiagonalMatrixElementFct fct, void *userData)
{
m_dim = dim; m_diagonalElementFct = fct; m_userData = userData;
}
Eigen::Index rows() const { return 3 * m_dim; }
Eigen::Index cols() const { return 3 * m_dim; }
Eigen::ComputationInfo info() { return Eigen::Success; }
template<typename MatType>
BlockJacobiPreconditioner3D& analyzePattern(const MatType&) { return *this; }
template<typename MatType>
BlockJacobiPreconditioner3D& factorize(const MatType& mat) { return *this; }
template<typename MatType>
BlockJacobiPreconditioner3D& compute(const MatType& mat)
{
m_invDiag.resize(m_dim);
#pragma omp parallel default(shared)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int)m_dim; i++)
{
Matrix3r res;
m_diagonalElementFct(i, res, m_userData);
m_invDiag[i] = res.inverse();
}
}
return *this;
}
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
#pragma omp parallel default(shared)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int)m_dim; i++)
{
static_cast<VectorXr&>(x).block<3, 1>(3 * i, 0) = m_invDiag[i] * static_cast<const VectorXr&>(b).block<3, 1>(3 * i, 0);
}
}
}
template<typename Rhs>
inline const Eigen::Solve<BlockJacobiPreconditioner3D, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const
{
return Eigen::Solve<BlockJacobiPreconditioner3D, Rhs>(*this, b.derived());
}
protected:
unsigned int m_dim;
/** diagonal matrix element callback */
DiagonalMatrixElementFct m_diagonalElementFct;
void *m_userData;
std::vector<Matrix3r> m_invDiag;
};
}
namespace Eigen
{
namespace internal
{
using namespace SPH;
/** Implementation of the matrix-free matrix vector product */
template<typename Rhs>
struct generic_product_impl<MatrixReplacement, Rhs, SparseShape, DenseShape, GemvProduct> // GEMV stands for generic matrix-vector
: generic_product_impl_base<MatrixReplacement, Rhs, generic_product_impl<MatrixReplacement, Rhs> >
{
typedef typename Product<MatrixReplacement, Rhs>::Scalar Scalar;
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const MatrixReplacement& lhs, const Rhs& rhs, const Scalar& alpha)
{
// This method should implement "dst += alpha * lhs * rhs" inplace,
// however, for iterative solvers, alpha is always equal to 1, so let's not bother about it.
assert(alpha == Scalar(1) && "scaling is not implemented");
const Real *vec = &rhs(0);
Real *res = &dst(0);
MatrixReplacement& lhs_ = const_cast<MatrixReplacement&>(lhs);
lhs_.getMatrixVecProdFct()(vec, res, lhs_.getUserData());
}
};
}
}
#endif
|
GB_binop__isle_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int8)
// A*D function (colscale): GB (_AxD__isle_int8)
// D*A function (rowscale): GB (_DxB__isle_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int8)
// C=scalar+B GB (_bind1st__isle_int8)
// C=scalar+B' GB (_bind1st_tran__isle_int8)
// C=A+scalar GB (_bind2nd__isle_int8)
// C=A'+scalar GB (_bind2nd_tran__isle_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT8 || GxB_NO_ISLE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
interpolation_pq.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
static inline void interpolation_pq_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int write_dim_j = block->dim.j<<1;
int write_dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read = level_c->my_boxes[ block->read.box].vectors[ id_c] + level_c->my_boxes[ block->read.box].ghosts*(1+level_c->my_boxes[ block->read.box].jStride+level_c->my_boxes[ block->read.box].kStride);
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
}
if(block->write.box>=0){
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->my_boxes[block->write.box].ghosts*(1+level_f->my_boxes[block->write.box].jStride+level_f->my_boxes[block->write.box].kStride);
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
}
int i,j,k;
double OneOver32Cubed = 1.0/32768.0;
for(k=0;k<write_dim_k;k++){int delta_k=-read_kStride;if(k&0x1)delta_k=read_kStride;
for(j=0;j<write_dim_j;j++){int delta_j=-read_jStride;if(j&0x1)delta_j=read_jStride;
for(i=0;i<write_dim_i;i++){int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
//
// | -3/32 | 30/32 | 5/32 |
// |---+---|---+---|---+---|
// | | | | x | | |
//
write[write_ijk] = prescale_f*write[write_ijk] +
OneOver32Cubed*(
-27.0*read[read_ijk-delta_i-delta_j-delta_k] +
270.0*read[read_ijk -delta_j-delta_k] +
45.0*read[read_ijk+delta_i-delta_j-delta_k] +
270.0*read[read_ijk-delta_i -delta_k] +
-2700.0*read[read_ijk -delta_k] +
-450.0*read[read_ijk+delta_i -delta_k] +
45.0*read[read_ijk-delta_i+delta_j-delta_k] +
-450.0*read[read_ijk +delta_j-delta_k] +
-75.0*read[read_ijk+delta_i+delta_j-delta_k] +
270.0*read[read_ijk-delta_i-delta_j ] +
-2700.0*read[read_ijk -delta_j ] +
-450.0*read[read_ijk+delta_i-delta_j ] +
-2700.0*read[read_ijk-delta_i ] +
27000.0*read[read_ijk ] +
4500.0*read[read_ijk+delta_i ] +
-450.0*read[read_ijk-delta_i+delta_j ] +
4500.0*read[read_ijk +delta_j ] +
750.0*read[read_ijk+delta_i+delta_j ] +
45.0*read[read_ijk-delta_i-delta_j+delta_k] +
-450.0*read[read_ijk -delta_j+delta_k] +
-75.0*read[read_ijk+delta_i-delta_j+delta_k] +
-450.0*read[read_ijk-delta_i +delta_k] +
4500.0*read[read_ijk +delta_k] +
750.0*read[read_ijk+delta_i +delta_k] +
-75.0*read[read_ijk-delta_i+delta_j+delta_k] +
750.0*read[read_ijk +delta_j+delta_k] +
125.0*read[read_ijk+delta_i+delta_j+delta_k]
);
}}}
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) piecewise quadratic interpolation
void interpolation_pq(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
exchange_boundary(level_c,id_c,0);
apply_BCs_quadratic(level_c,id_c,0);
uint64_t _timeCommunicationStart = CycleTime();
uint64_t _timeStart,_timeEnd;
int buffer=0;
int n;
int my_tag = (level_f->tag<<4) | 0x7;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_recv += (_timeEnd-_timeStart);
// pack MPI send buffers...
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0])
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){
// !!! prescale==0 because you don't want to increment the MPI buffer
interpolation_pq_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_pack += (_timeEnd-_timeStart);
// loop through MPI send buffers and post Isend's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_send += (_timeEnd-_timeStart);
#endif
// perform local interpolation... try and hide within Isend latency...
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1])
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){
interpolation_pq_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_local += (_timeEnd-_timeStart);
// wait for MPI to finish...
#ifdef USE_MPI
_timeStart = CycleTime();
if(nMessages)MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = CycleTime();
level_f->cycles.interpolation_wait += (_timeEnd-_timeStart);
// unpack MPI receive buffers
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2])
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){
IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_unpack += (_timeEnd-_timeStart);
#endif
level_f->cycles.interpolation_total += (uint64_t)(CycleTime()-_timeCommunicationStart);
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(8*t1+Ny+13,16)),floord(16*t2+Ny+12,16)),floord(16*t1-16*t2+Nz+Ny+11,16));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2044,2048)),ceild(16*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(8*t1+Nx+13,2048)),floord(16*t2+Nx+12,2048)),floord(16*t3+Nx+12,2048)),floord(16*t1-16*t2+Nz+Nx+11,2048));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),16*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),16*t3+14),2048*t4+2046),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
class IdentifierInfo;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the original shared item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Ref, const Expr *Private, const Expr *ReductionOp)
: Ref(Ref), Private(Private), ReductionOp(ReductionOp) {}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of original shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedLVal Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, LValue SharedLVal,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for a reduction item.
/// \param N Number of the reduction item.
void emitSharedLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedLVal Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqueName;
LValue IVLVal;
llvm::Function *Fn = nullptr;
bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
enum class ActionToDo {
DoNotPush,
PushAsLastprivateConditional,
DisableLastprivateConditional,
};
CodeGenModule &CGM;
ActionToDo Action = ActionToDo::DoNotPush;
/// Check and try to disable analysis of inner regions for changes in
/// lastprivate conditional.
void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
llvm::DenseSet<CanonicalDeclPtr<const Decl>>
&NeedToAddForLPCsAsDisabled) const;
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
public:
explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
LValue IVLVal);
static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Tries to emit declare variant function for \p OldGD from \p NewGD.
/// \param OrigAddr LLVM IR value for \p OldGD.
/// \param IsForDefinition true, if requested emission for the definition of
/// \p OldGD.
/// \returns true, was able to emit a definition function for \p OldGD, which
/// points to \p NewGD.
virtual bool tryEmitDeclareVariant(const GlobalDecl &NewGD,
const GlobalDecl &OldGD,
llvm::GlobalValue *OrigAddr,
bool IsForDefinition);
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource = nullptr;
using FlagsTy = std::pair<unsigned, unsigned>;
/// Map of flags and corresponding default locations.
using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
Address getOrCreateDefaultLocation(unsigned Flags);
QualType IdentQTy;
llvm::StructType *IdentTy = nullptr;
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Maps local variables marked as lastprivate conditional to their internal
/// types.
llvm::DenseMap<llvm::Function *,
llvm::DenseMap<CanonicalDeclPtr<const Decl>,
std::tuple<QualType, const FieldDecl *,
const FieldDecl *, LValue>>>
LastprivateConditionalToTypes;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// int32_t flags;
/// int32_t reserved;
/// };
QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
/// Mapping of the original functions to their variants and original global
/// decl.
llvm::MapVector<CanonicalDeclPtr<const FunctionDecl>,
std::pair<GlobalDecl, GlobalDecl>>
DeferredVariantFunction;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Atomic ordering from the omp requires directive.
llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns specified OpenMP runtime function.
/// \param Function OpenMP runtime function.
/// \return Specified function.
llvm::FunctionCallee createRuntimeFunction(unsigned Function);
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Returns default address space for the constant firstprivates, 0 by
/// default.
virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
SourceLocation Loc);
/// Returns the number of the elements and the address of the depobj
/// dependency array.
/// \return Number of elements in depobj array and the pointer to the array of
/// dependencies.
std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
LValue DepobjLVal,
SourceLocation Loc);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _task_red_item_t red_data[n];
/// ...
/// red_data[i].shar = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
/// \endcode
///
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Registers provided target firstprivate variable as global on the
/// target.
llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
const VarDecl *VD);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library.
llvm::Value *MapTypesArray = nullptr;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void processRequiresDirective(const OMPRequiresDecl *D);
/// Gets default memory ordering as specified in requires directive.
llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Emits the definition of the declare variant function.
virtual bool emitDeclareVariant(GlobalDecl GD, bool IsForDefinition);
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Create specialized alloca to handle lastprivate conditionals.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Checks if the lastprivate conditional was updated in inner region and
/// writes the value.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;bool Fired = false;
/// #pragma omp ... shared(a)
/// {
/// lp_a = ...;
/// Fired = true;
/// }
/// if (Fired) {
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// Fired = false;
/// }
/// \endcode
virtual void checkAndEmitSharedLastprivateConditional(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs).
/// \param ForDepobj true if the memory for depencies is alloacted for depobj
/// directive. In this case, the variable is allocated in dynamically.
/// \returns Pointer to the first element of the array casted to VoidPtr type.
std::pair<llvm::Value *, Address> emitDependClause(
CodeGenFunction &CGF,
ArrayRef<std::pair<OpenMPDependClauseKind, const Expr *>> Dependencies,
bool ForDepobj, SourceLocation Loc);
/// Emits the code to destroy the dependency object provided in depobj
/// directive.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc);
/// Updates the dependency kind in the specified depobj object.
/// \param DepobjLVal LValue for the main depobj object.
/// \param NewDepKind New dependency kind.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _task_red_item_t red_data[n];
/// ...
/// red_data[i].shar = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
/// \endcode
///
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
outeronly2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
The inner loop has loop carried anti data dependence.
However, the loop is not parallelized so no race condition.
*/
int n=100, m=100;
double b[100][100];
void foo()
{
int i,j;
#pragma omp parallel for private(j)
for (i=0;i<n;i++)
for (j=1;j<m;j++) // Be careful about bounds of j
b[i][j]=b[i][j-1];
}
int main()
{
foo();
return 0;
}
|
psocpp.h | /** psocpp.h
*
* Author: Fabian Meyer
* Created on: 08 Jan 2019
* License: MIT
*/
#ifndef PSOCPP_PSOCPP_H_
#define PSOCPP_PSOCPP_H_
#include <Eigen/Geometry>
#include <stdexcept>
#include <limits>
#include <functional>
#include <iostream>
#include <ctime>
#include <iomanip>
#include <random>
namespace pso
{
/** Integer type for indexing arrays, vectors and matrices. */
typedef long int Index;
/** @brief Dummy callback functor, which always and only returns true. */
template<typename Scalar>
class NoCallback
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
bool operator()(const Index, const Matrix&, const Vector &, const Index) const
{
return true;
}
};
/** @brief Inertia weight functor, which returns a constant weight. */
template<typename Scalar>
class ConstantWeight
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
private:
Scalar weight_;
public:
ConstantWeight()
: ConstantWeight(1.0)
{ }
/** Constructor, which accepts the weight that is returned by the functor.
* @param weight constant which will be returned as inertia weight */
ConstantWeight(const Scalar weight)
: weight_(weight)
{ }
Scalar operator()(const Index,
const Index) const
{
return weight_;
}
};
/** @brief Inertia weight functor, which decreases linearly over time.
*
* The inertia weight is calculated by the following formula:
*
* w = wMin + (wMax - wMin) * (t / tMax) */
template<typename Scalar>
class LinearDecrease
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
private:
Scalar weightMin_;
Scalar weightMax_;
public:
LinearDecrease()
: LinearDecrease(0.4, 0.9)
{ }
/** @brief Constructor, which accepts the minimum and maximum weight of
* the linear decrease.
*
* The returned inertia weight always lies in the interval [minval, maxval].
* @param minval lower bound of the inertia weight
* @param maxval upper bound of the inertia weight */
LinearDecrease(const Scalar minval,
const Scalar maxval)
: weightMin_(minval), weightMax_(maxval)
{ }
Scalar operator()(const Index iteration,
const Index maxIt) const
{
Scalar factor = static_cast<Scalar>(iteration) / static_cast<Scalar>(maxIt);
return weightMin_ + (weightMax_ - weightMin_) * factor;
}
};
/** @brief Inertia weight functor, which decreases exponentially over time.
*
* The inertia weight is calculated by the following formula:
*
* w = wMin + (wMax - wMin) * exp(-t / (tMax / 10)) */
template<typename Scalar>
class ExponentialDecrease1
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
private:
Scalar weightMin_;
Scalar weightMax_;
public:
ExponentialDecrease1()
: ExponentialDecrease1(0.4, 0.9)
{ }
/** Constructor, which accepts the minimum and maximum weight of the
* exponential decrease.
* The returned inertia weight always lies in the interval [minval, maxval].
* @param minval lower bound of the inertia weight
* @param maxval upper bound of the inertia weight */
ExponentialDecrease1(const Scalar minval, const Scalar maxval)
: weightMin_(minval), weightMax_(maxval)
{ }
Scalar operator()(const Index iteration,
const Index maxIt) const
{
Scalar exponent = static_cast<Scalar>(iteration) / (static_cast<Scalar>(maxIt) / 10.0);
return weightMin_ + (weightMax_ - weightMin_) * std::exp(-exponent);
}
};
/** @brief Inertia weight functor, which decreases exponentially over time.
*
* The inertia weight is calculated by the following formula:
*
* w = wMin + (wMax - wMin) * exp(-(t / (tMax / 4))^2) */
template<typename Scalar>
class ExponentialDecrease2
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
private:
Scalar weightMin_;
Scalar weightMax_;
public:
ExponentialDecrease2()
: ExponentialDecrease2(0.4, 0.9)
{ }
/** Constructor, which accepts the minimum and maximum weight of the
* exponential decrease.
* The returned inertia weight always lies in the interval [minval, maxval].
* @param minval lower bound of the inertia weight
* @param maxval upper bound of the inertia weight */
ExponentialDecrease2(const Scalar minval, const Scalar maxval)
: weightMin_(minval), weightMax_(maxval)
{ }
Scalar operator()(const Index iteration,
const Index maxIt) const
{
Scalar exponent = static_cast<Scalar>(iteration) / (static_cast<Scalar>(maxIt) / 4.0);
exponent *= exponent;
return weightMin_ + (weightMax_ - weightMin_) * std::exp(-exponent);
}
};
/** @brief Inertia weight functor, which decreases exponentially over time.
*
* The inertia weight is calculated by the following formula:
*
* w = (wMax - wMin - d1) * exp(1 / (1 + d2 t / tMax)) */
template<typename Scalar>
class ExponentialDecrease3
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
private:
Scalar weightMin_;
Scalar weightMax_;
/** Control factors */
Scalar d1_;
Scalar d2_;
public:
ExponentialDecrease3()
: ExponentialDecrease3(0.4, 0.95, 0.2, 7.0)
{ }
/** Constructor, which accepts the minimum and maximum weight and two
* control factors of the exponential decrease.
* The returned inertia weight always lies in the interval [minval, maxval].
* @param minval lower bound of the inertia weight
* @param maxval upper bound of the inertia weight
* @param d1 first control factor
* @param d2 second control factor */
ExponentialDecrease3(const Scalar minval,
const Scalar maxval,
const Scalar d1,
const Scalar d2)
: weightMin_(minval), weightMax_(maxval), d1_(d1), d2_(d2)
{ }
Scalar operator()(const Index iteration,
const Index maxIt) const
{
Scalar itFac = static_cast<Scalar>(iteration) / static_cast<Scalar>(maxIt);
Scalar exponent = 1.0 / (1.0 + d2_ * itFac);
return (weightMax_ - weightMin_ - d1_) * std::exp(exponent);
}
};
/** @brief Implements the paricle swarm optimization agorithm.
*
* The optimization process can be configured by providing an inertia
* weight strategy functor and a callback.
*
* The inertia weight functor determines the amount of velocity, which is
* is maintained from the previous iterations. It has a huge effect on
* convergence speed and stability of the optimization.
*
* The callback functor is called after each iteration and returns a boolean.
* If it returns false the optimization process is stopped. As such, the
* callback allows to implement additional stop criteria. */
template<typename Scalar,
typename Objective,
typename InertiaWeightStrategy = ConstantWeight<Scalar>,
typename Callback = NoCallback<Scalar> >
class ParticleSwarmOptimization
{
public:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
struct Result
{
Index iterations;
bool converged;
Scalar fval;
Vector xval;
};
private:
Objective objective_;
Callback callback_;
InertiaWeightStrategy weightStrategy_;
Index threads_;
Index maxIt_;
Scalar xeps_;
Scalar feps_;
Scalar phip_;
Scalar phig_;
Scalar maxVel_;
Index verbosity_;
std::function<Scalar()> dice_;
template<typename Derived>
std::string vector2str(const Eigen::MatrixBase<Derived> &vec) const
{
std::stringstream ss1;
ss1 << std::fixed << std::showpoint << std::setprecision(6);
std::stringstream ss2;
ss2 << '[';
for(Index i = 0; i < vec.size(); ++i)
{
ss1 << vec(i);
ss2 << std::setfill(' ') << std::setw(10) << ss1.str();
if(i != vec.size() - 1)
ss2 << ' ';
ss1.str("");
}
ss2 << ']';
return ss2.str();
}
void randomizeParticles(const Matrix &bounds, Matrix &particles)
{
for(Index i = 0; i < particles.cols(); ++i)
{
for(Index j = 0; j < particles.rows(); ++j)
{
Scalar minval = bounds(0, j);
Scalar maxval = bounds(1, j);
Scalar diff = maxval - minval;
particles(j, i) = minval + (dice_() * diff);
}
}
}
void randomizeVelocities(const Matrix &bounds, Matrix &velocities)
{
for(Index i = 0; i < velocities.cols(); ++i)
{
for(Index j = 0; j < velocities.rows(); ++j)
{
Scalar minval = bounds(0, j);
Scalar maxval = bounds(1, j);
Scalar diff = maxval - minval;
Scalar vel = -diff + (dice_() * 2 * diff);
velocities(j, i) = std::min(maxVel_, std::max(-maxVel_, vel));
}
}
}
void evaluateObjective(const Matrix &particles,
Vector &fvals)
{
#pragma omp parallel for num_threads(threads_)
for(Index i = 0; i < particles.cols(); ++i)
fvals(i) = objective_(particles.col(i));
}
void maintainBounds(const Matrix &bounds, Matrix &particles) const
{
for(Index i = 0; i < particles.cols(); ++i)
{
for(Index j = 0; j < particles.rows(); ++j)
{
Scalar minval = bounds(0, j);
Scalar maxval = bounds(1, j);
Scalar val = particles(j, i);
particles(j, i) = std::min(maxval, std::max(minval, val));
}
}
}
void calculateVelocities(const Matrix &particles,
const Matrix &bestParticles,
const Index gbest,
const Index iteration,
Matrix &velocities)
{
assert(velocities.rows() == particles.rows());
assert(velocities.cols() == particles.cols());
assert(velocities.rows() == bestParticles.rows());
assert(velocities.cols() == bestParticles.cols());
assert(gbest < bestParticles.cols());
Scalar weight = weightStrategy_(iteration, maxIt_);
for(Index i = 0; i < velocities.cols(); ++i)
{
for(Index j = 0; j < velocities.rows(); ++j)
{
Scalar velp = dice_() * (bestParticles(j, i) - particles(j, i));
Scalar velg = dice_() * (bestParticles(j, gbest) - particles(j, i));
Scalar vel = weight * velocities(j, i) + phip_ * velp + phig_ * velg;
if(maxVel_ > 0)
vel = std::min(maxVel_, std::max(-maxVel_, vel));
velocities(j, i) = vel;
}
}
}
Result _minimize(const Matrix &bounds,
Matrix &particles)
{
Matrix velocities(particles.rows(), particles.cols());
Vector fvals(particles.cols());
Matrix bestParticles = particles;
Vector bestFvals(particles.cols());
Matrix prevParticles(particles.rows(), particles.cols());
Vector prevFvals(particles.cols());
Vector diff(particles.rows());
Index gbest = 0;
// initialize velocities randomly
randomizeVelocities(bounds, velocities);
// evaluate objective function for the initial particles
evaluateObjective(particles, fvals);
bestFvals = fvals;
bestFvals.minCoeff(&gbest);
// init stop conditions
Index iterations = 0;
Scalar fchange = feps_ + 1;
Scalar xchange = xeps_ + 1;
while((maxIt_ == 0 || iterations < maxIt_) &&
fchange > feps_ && xchange > xeps_)
{
// calculate new velocities
calculateVelocities(particles, bestParticles, gbest, iterations, velocities);
// move particles by velocity and stay within bounds
particles += velocities;
maintainBounds(bounds, particles);
// evaluate objective for moved particles
evaluateObjective(particles, fvals);
prevParticles = bestParticles;
prevFvals = bestFvals;
for(Index i = 0; i < fvals.size(); ++i)
{
// check if there was an improvement and update best vals
if(fvals(i) < bestFvals(i))
{
bestFvals(i) = fvals(i);
bestParticles.col(i) = particles.col(i);
}
}
bestFvals.minCoeff(&gbest);
// calculate new diffs
xchange = (bestParticles - prevParticles).colwise().norm().sum();
fchange = (bestFvals - prevFvals).array().abs().sum();
xchange /= bestParticles.cols();
fchange /= bestFvals.size();
// evaluate callback and save its result
bool callbackResult = callback_(iterations, bestParticles,
bestFvals, gbest);
if(verbosity_ > 0)
{
std::stringstream ss;
ss << "it=" << std::setfill('0')
<< std::setw(4) << iterations
<< std::fixed << std::showpoint << std::setprecision(6)
<< " fchange=" << fchange
<< " xchange=" << xchange;
if(verbosity_ > 2)
ss << " callback=" << (callbackResult ? "true" : "false");
ss << " fval=" << bestFvals(gbest);
if(verbosity_ > 1)
ss << " xval=" << vector2str(bestParticles.col(gbest));
std::cout << ss.str() << std::endl;;
}
++iterations;
}
Result result;
result.iterations = iterations;
result.converged = fchange <= feps_ || xchange <= xeps_;
result.fval = bestFvals(gbest);
result.xval = bestParticles.col(gbest);
return result;
}
public:
ParticleSwarmOptimization()
: objective_(), callback_(), weightStrategy_(), threads_(1),
maxIt_(0), xeps_(static_cast<Scalar>(1e-6)),
feps_(static_cast<Scalar>(1e-6)), phip_(static_cast<Scalar>(2.0)),
phig_(static_cast<Scalar>(2.0)), maxVel_(static_cast<Scalar>(0.0)),
verbosity_(0), dice_()
{
std::default_random_engine gen(std::time(0));
std::uniform_real_distribution<Scalar> distrib(0.0, 1.0);
dice_ = std::bind(distrib, gen);
}
/** Set the amount of threads, which are used for evaluating the
* individual particles (OMP only).
* Set to 0 or negative to allow auto detection.
* @param threads maximum number of threads for evaluation */
void setThreads(const Index threads)
{
threads_ = threads;
}
/** Set the maximum number of iterations.
* Set to 0 or negative for infinite iterations.
* @param iterations maximum number of iterations */
void setMaxIterations(const Index iterations)
{
maxIt_ = iterations;
}
/** Set the minimum average change of particles per iteration.
* If the average change of particles (input parameters) falls below
* this value, the optimization terminates.
* @param change minimum change of input paramaters */
void setMinParticleChange(const Scalar change)
{
xeps_ = change;
}
/** Set the minimum average change of function values per iteration.
* If the average change of functions values falls below
* this value, the optimization terminates.
* @param change minimum change of function values */
void setMinFunctionChange(const Scalar change)
{
feps_ = change;
}
/** Set the tendency of particles to move towards their local optimum
* found so far.
* Each particle individually maintains a memory of where it has
* visited the lowest function value so far.
* Increasing this value increases the particles' tendency to move
* towards that point.
* @param phip tendency to move towards individual optimum */
void setPhiParticles(const Scalar phip)
{
phip_ = phip;
}
/** Set the tendency of particles to move towards the global optimum
* found so far.
* The swarm maintains a collective memory of where it has visited the
* lowest function value so far.
* Increasing this value increases the particles' tendency to move
* towards that point.
* @param phig tendency to move towards collective optimum */
void setPhiGlobal(const Scalar phig)
{
phig_ = phig;
}
/** Set an upper bound for the velocity of particles.
* A particle cannot move faster than this value, which may prevent
* divergence.
* @param maxvel maximum velocity of a particle */
void setMaxVelocity(const Scalar maxvel)
{
maxVel_ = maxvel;
}
/** Set the level of verbosity during optimization.
* Verbosity increases with increasing value.
* 0 means no output and it can be raised up to level 3.
* @param verbosity level of verbosity */
void setVerbosity(const Index verbosity)
{
verbosity_ = verbosity;
}
void setObjective(const Objective &objective)
{
objective_ = objective;
}
void setCallback(const Callback &callback)
{
callback_ = callback;
}
void setInertiaWeightStrategy(const InertiaWeightStrategy &weightStrategy)
{
weightStrategy_ = weightStrategy;
}
/** Perform minimization with the given bounds and number of particels.
*
* The swarm of particles will be drawn uniform randomly within the
* given bounds.
*
* The bounds matrix has to have 2 rows and one column per dimension
* of particle. The first row holds the minimum value of the respective
* dimension and the second row holds the maximum value.
*
* @param bounds 2xM matrix for bounds of M-dimensional particles
* @param cnt number of particles used for optimization */
Result minimize(const Matrix &bounds,
const Index cnt)
{
if(cnt == 0)
throw std::runtime_error("particle count cannot be 0");
if(bounds.rows() != 2)
throw std::runtime_error("bounds has not exactly 2 rows (min, max)");
for(Index i = 0; i < bounds.cols(); ++i)
{
if(bounds(0, i) >= bounds(1, i))
throw std::runtime_error("bounds min is greater than max");
}
Matrix particles(bounds.cols(), cnt);
randomizeParticles(bounds, particles);
return _minimize(bounds, particles);
}
/** Perform minimization with the given bounds, number of particels and
* initial guess.
*
* The swarm of particles will be drawn uniform randomly within the
* given bounds.
*
* The bounds matrix has to have 2 rows and one column per dimension
* of particle. The first row holds the minimum value of the respective
* dimension and the second row holds the maximum value.
*
* The initial guess vector has to have the same length as the number
* of columns of the bounds. It will be included as one particle of
* the swarm.
*
* @param bounds 2xM matrix for bounds of M-dimensional particles
* @param cnt number of particles used for optimization
* @param initGuess initial guess for a particle */
Result minimize(const Matrix &bounds,
const Index cnt,
const Vector &initGuess)
{
if(cnt == 0)
throw std::runtime_error("particle count cannot be 0");
if(bounds.rows() != 2)
throw std::runtime_error("bounds has not exactly 2 rows (min, max)");
for(Index i = 0; i < bounds.cols(); ++i)
{
if(bounds(0, i) >= bounds(1, i))
throw std::runtime_error("bounds min is greater than max");
}
if(bounds.cols() != initGuess.size())
throw std::runtime_error("init guess and bounds have different dimensions");
Matrix particles(bounds.cols(), cnt);
randomizeParticles(bounds, particles);
particles.col(0) = initGuess;
maintainBounds(bounds, particles);
return _minimize(bounds, particles);
}
/** Perform minimization with the given bounds and a pre-computed
* swarm of particles.
*
* The bounds matrix has to have 2 rows and one column per dimension
* of particle. The first row holds the minimum value of the respective
* dimension and the second row holds the maximum value.
*
* @param bounds 2xM matrix for bounds of M-dimensional particles
* @param particles initial swarm used for optimization */
Result minimize(const Matrix &bounds,
Matrix &particles)
{
if(bounds.rows() != 2)
throw std::runtime_error("bounds has not exactly 2 rows (min, max)");
if(bounds.cols() != particles.rows())
throw std::runtime_error("columns of bounds and rows of "
"particles do not match");
for(Index i = 0; i < bounds.cols(); ++i)
{
if(bounds(0, i) >= bounds(1, i))
throw std::runtime_error("bounds min is greater than max");
}
maintainBounds(bounds, particles);
return _minimize(bounds, particles);
}
void getRandomParticles(const Matrix &bounds,
const Index cnt,
Matrix &particles)
{
particles.resize(bounds.cols(), cnt);
randomizeParticles(bounds, particles);
}
};
}
#endif
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-11,12)),ceild(4*t2-Nz-20,24));t3<=min(min(min(floord(4*t2+Ny,24),floord(Nt+Ny-4,24)),floord(2*t1+Ny+1,24)),floord(4*t1-4*t2+Nz+Ny-1,24));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(4*t2-Nz-124,128)),ceild(24*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t2+Nx,128),floord(Nt+Nx-4,128)),floord(2*t1+Nx+1,128)),floord(24*t3+Nx+20,128)),floord(4*t1-4*t2+Nz+Nx-1,128));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),24*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),24*t3+22),128*t4+126),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
distribute_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify=expected,omp50 %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp distribute parallel for simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd foo
void test_no_clause() {
int i;
#pragma omp distribute parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp distribute parallel for simd' must be a for loop}}
#pragma omp distribute parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd firstprivate(x);
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd safelen
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd simdlen
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd simdlen()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp distribute parallel for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp distribute parallel for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd collapse(2)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp distribute parallel for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd linear(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd linear(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd aligned(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(z)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp distribute parallel for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 2 {{lastprivate variable cannot be firstprivate}} expected-note@+3 2 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp target
#pragma omp teams
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp distribute parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp distribute parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp distribute parallel for simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp distribute parallel for simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp distribute parallel for simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
#pragma omp distribute parallel for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected '(' after 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp distribute parallel for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp distribute parallel for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp distribute parallel for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}}
for (int i = 0; i < 10; ++i)
;
#pragma omp distribute parallel for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}}
for (int i = 0; i < 10; ++i)
;
}
|
kmedian.h | #pragma once
#ifndef FGC_KMEDIAN_H__
#define FGC_KMEDIAN_H__
#include "minicore/optim/kmeans.h"
#include "minicore/util/csc.h"
#include <algorithm>
namespace minicore {
namespace coresets {
using namespace blz;
template<typename VT, bool TF, typename FT, typename IT>
static INLINE void __assign(blaze::DenseVector<VT, TF> &vec, IT ind, FT val) {
(*vec)[ind] = val;
}
#if 1
template<typename VT, bool TF, typename FT, typename IT>
static INLINE void __assign(blaze::SparseVector<VT, TF> &vec, IT ind, FT val) {
static_assert(std::is_integral_v<IT>, "Sanity1");
static_assert(std::is_arithmetic_v<FT>, "Sanity2");
auto &rr = *vec;
if(val != FT(0.)) {
if(rr.capacity() <= rr.nonZeros() + 1)
rr.reserve(std::max((rr.nonZeros() + 1) << 1, size_t(4)));
rr.append(ind, val);
}
}
#else
template<typename VT, bool TF, typename FT, typename IT>
static INLINE void __assign(blaze::SparseVector<VT, TF> &vec, IT ind, FT val) {
(*vec).set(ind, val);
}
#endif
namespace detail {
struct IndexCmp {
template<typename T>
bool operator()(const T x, const T y) const {return x->index() > y->index();}
template<typename T, typename IT>
bool operator()(const std::pair<T, IT> x, const std::pair<T, IT> y) const {
return this->operator()(x.first, y.first);
}
};
template<typename CI, typename IT=uint32_t>
struct IndexPQ: public std::priority_queue<std::pair<CI, IT>, std::vector<std::pair<CI, IT>>, IndexCmp> {
IndexPQ(size_t nelem) {
this->c.reserve(nelem);
}
auto &getc() {return this->c;}
const auto &getc() const {return this->c;}
auto getsorted() const {
auto tmp = getc();
std::fprintf(stderr, "pq size: %zu\n", tmp.size());
std::sort(tmp.begin(), tmp.end(), this->comp);
return tmp;
}
};
} // namespace detail
template<typename MT, bool SO, typename VT, bool TF>
void sparse_l1_unweighted_median(const blz::SparseMatrix<MT, SO> &data, blz::Vector<VT, TF> &ret) {
if((*data).rows() == 1) {
*ret = row(*data, 0);
return;
}
using FT = blaze::ElementType_t<MT>;
auto &ctr = *ret;
if constexpr(blaze::IsSparseVector_v<VT>) {
ctr.reset();
}
using CI = typename MT::ConstIterator;
const size_t nd = (*data).columns(), nr = (*data).rows(), hlf = nr / 2, odd = nr & 1;
detail::IndexPQ<CI, uint32_t> pq(nr);
std::unique_ptr<CI[]> ve(new CI[nr]);
for(unsigned i = 0; i < nr; ++i) {
auto r(row(*data, i));
pq.push(std::pair<CI, uint32_t>(r.begin(), i));
ve[i] = r.end();
}
assert(pq.size() == (*data).rows());
uint32_t cid = 0;
std::vector<FT> vals;
assert(pq.empty() || pq.top().first->index() == std::min_element(pq.getc().begin(), pq.getc().end(), [](auto x, auto y) {return x.first->index() < y.first->index();})->first->index());
// Setting all to 0 lets us simply skip elements with the wrong number of nonzeros.
while(pq.size()) {
//std::fprintf(stderr, "Top index: %zu\n", pq.top().first->index());
if constexpr(!blaze::IsSparseVector_v<VT>) {
while(cid < pq.top().first->index())
__assign(ctr, cid++, 0);
if(unlikely(cid > pq.top().first->index())) {
std::fprintf(stderr, "cid: %u. top index: %zu\n", cid, pq.top().first->index());
std::exit(1);
#if 0
auto pqs = pq.getsorted();
for(const auto v: pqs) std::fprintf(stderr, "%zu:%g\n", v.first->index(), v.first->value());
std::exit(1);
#endif
}
} else cid = pq.top().first->index();
while(pq.top().first->index() == cid) {
auto pair = pq.top();
pq.pop();
vals.push_back(pair.first->value());
if(++pair.first != ve[pair.second]) {
pq.push(pair);
} else if(pq.empty()) break;
}
const size_t vsz = vals.size();
FT val;
if(vsz < hlf) {
val = 0.;
} else {
shared::sort(vals.data(), vals.data() + vals.size());
const size_t idx = vals.size() - nr / 2 - 1;
val = odd ? vals[idx]: (vals[idx] + vals[idx + 1]) * FT(.5);
}
__assign(ctr, cid, val);
++cid;
vals.clear();
}
if constexpr(blaze::IsDenseVector_v<VT>) {
while(cid < nd) ctr[cid++] = 0;
}
}
template<typename MT, bool SO, typename VT, bool TF>
void l1_unweighted_median(const blz::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret) {
#if 0
if constexpr(blz::IsSparseMatrix_v<MT>) {
sparse_l1_unweighted_median(*data, ret);
return;
}
#endif
//std::fprintf(stderr, "%s unweighted l1 median. data shape: %zu/%zu. Return shape: %zu\n", blaze::IsDenseMatrix_v<MT> ? "Dense": "Sparse", (*data).rows(), (*data).columns(), (*ret).size());
assert((*ret).size() == (*data).columns());
auto &rr(*ret);
const auto &dr(*data);
const bool odd = dr.rows() % 2;
const size_t hlf = dr.rows() / 2;
blaze::DynamicVector<ElementType_t<MT>, blaze::columnVector> dv;
if constexpr(blaze::IsSparseVector_v<VT>) {
(*ret).reset();
}
for(size_t i = 0; i < dr.columns(); ++i) {
dv = column(dr, i);
// Should do fast copying.
shared::sort(dv.begin(), dv.end());
auto val = odd ? dv[hlf]: ElementType_t<MT>(.5) * (dv[hlf - 1] + dv[hlf]);
#if 0
std::fprintf(stderr, "val %g at %zu\n", val, i);
#endif
__assign(rr, i, val);
assert(rr[i] == val || !std::fprintf(stderr, "rr[i] %g vs %g\n", double(rr[i]), val));
}
}
template<typename MT, bool SO, typename VT, bool TF, typename Rows>
void l1_unweighted_median(const blz::Matrix<MT, SO> &_data, const Rows &rs, blz::Vector<VT, TF> &ret) {
assert((*ret).size() == (*_data).columns());
auto &rr(*ret);
const auto &dr(*_data);
const bool odd = rs.size() % 2;
const size_t hlf = rs.size() / 2;
const size_t nc = dr.columns();
blaze::DynamicMatrix<ElementType_t<MT>, SO> tmpind;
if constexpr(blaze::IsSparseVector_v<VT>) {
(*ret).reset();
}
size_t i;
for(i = 0; i < nc;) {
const unsigned nr = std::min(size_t(8), nc - i);
tmpind = trans(blaze::submatrix(blaze::rows(dr, rs.data(), rs.size()), 0, i * nr, rs.size(), nr));
for(unsigned j = 0; j < nr; ++j) {
auto r(blaze::row(tmpind, j));
shared::sort(r.begin(), r.end());
__assign(rr, i + j, odd ? r[hlf]: ElementType_t<MT>(0.5) * (r[hlf - 1] + r[hlf]));
}
i += nr;
}
}
#if 0
template<typename DataT, typename IndicesT, typename IndPtrT, typename VT2, bool TF2, typename IT=uint32_t, typename WT>
static inline void l1_median(const util::CSparseMatrix<DataT, IndicesT, IndPtrT> &data, blz::Vector<VT2, TF2> &ret, const IT *indices=(const IT *)nullptr, size_t nasn=0, const WT *weights=static_cast<WT *>(nullptr)) {
const size_t nc = data.columns();
if((*ret).size() != nc) {
(*ret).resize(nc);
}
(*ret).reset();
if(unlikely((*data).columns() > ((uint64_t(1) << (sizeof(IT) * CHAR_BIT)) - 1)))
throw std::runtime_error("Use a different index type, there are more features than fit in IT");
const size_t npoints = indices ? nasn: (*data).rows();
if(!npoints) throw std::invalid_argument("Can't take the median of no points");
using FT = blaze::CommonType_t<DataT, blz::ElementType_t<VT2>, std::decay_t<blz::ElementType_t<WT>>>;
std::vector<std::vector<std::pair<DataT, IT>>> pairs(nc); // One list of pairs per column
for(auto &p: pairs) p.reserve(npoints);
#ifdef _OPENMP
int nt;
#pragma omp parallel
{
nt = omp_get_num_threads();
}
auto mutexes = std::make_unique<std::mutex[]>(nt);
OMP_PFOR
#endif
for(size_t i = 0; i < npoints; ++i) {
size_t j = 0;
const auto rowind = indices ? size_t(indices[i]): i;
const std::pair<DataT, IT> empty(0, rowind);
auto crow = row(data, rowind);
auto cbeg = crow.begin(), cend = crow.end();
for(;cbeg != cend;++cbeg, ++j) {
const auto ind = cbeg->index();
while(j < ind) {
OMP_ONLY(std::lock_guard<std::mutex> lock(mutexes[j]);)
pairs[j++].push_back(empty);
}
OMP_ONLY(std::lock_guard<std::mutex> lock(mutexes[ind]);)
pairs[ind].push_back(std::pair<DataT, IT>(cbeg->value(), rowind));
++cbeg;
}
while(j < nc) {
OMP_ONLY(std::lock_guard<std::mutex> lock(mutexes[j]);)
pairs[j++].push_back(empty);
}
}
// First, compute sorted pairs
// Then find median for each column
OMP_PFOR_DYN
for(size_t i = 0; i < nc; ++i) {
auto &cpairs = pairs[i];
shared::sort(cpairs.begin(), cpairs.end());
FT wsum = 0., maxw = -std::numeric_limits<FT>::max();
IT maxind = -0;
for(size_t j = 0; j < npoints; ++j) {
double neww = 1.;
if(weights) neww = (*weights)[cpairs[j].second];
wsum += neww;
if(neww > maxw) maxw = neww, maxind = j;
}
if(maxw > wsum * .5) {
// Return the value of the tuple with maximum weight
__assign(*ret, i, cpairs[maxind].first);
continue;
}
FT mid = wsum * .5;
auto it = std::lower_bound(cpairs.begin(), cpairs.end(), mid,
[](std::pair<DataT, IT> x, FT y)
{
return x.first < y;
});
OMP_CRITICAL {
__assign(*ret, i, it->first == mid ? FT(.5 * (it->first + it[1].first)): FT(it[1].first));
}
}
}
#endif
template<typename MT, bool SO, typename VT2, bool TF2, typename IT=uint32_t, typename WT>
static inline void weighted_median(const blz::Matrix<MT, SO> &data, blz::Vector<VT2, TF2> &ret, const WT &weights) {
const size_t nc = (*data).columns();
if((*ret).size() != nc) {
(*ret).resize(nc);
}
if constexpr(blaze::IsSparseVector_v<VT2>) {
(*ret).reset();
}
if(unlikely((*data).columns() > ((uint64_t(1) << (sizeof(IT) * CHAR_BIT)) - 1)))
throw std::runtime_error("Use a different index type, there are more features than fit in IT");
const size_t nr = (*data).rows();
auto pairs = std::make_unique<std::pair<ElementType_t<MT>, IT>[]>(nr);
using FT = blaze::CommonType_t<blz::ElementType_t<MT>, blz::ElementType_t<VT2>, std::decay_t<decltype(weights[0])>>;
for(size_t i = 0; i < nc; ++i) {
auto col = column(*data, i);
for(size_t j = 0; j < nr; ++j)
pairs[j] = {col[j], j};
shared::sort(pairs.get(), pairs.get() + nr);
FT wsum = 0., maxw = -std::numeric_limits<FT>::max();
IT maxind = -0;
for(size_t j = 0; j < nr; ++j) {
auto neww = weights[pairs[j].second];
if(neww > maxw) maxw = neww, maxind = j;
}
if(maxw > wsum * .5) {
// Return the value of the tuple with maximum weight
__assign(*ret, i, pairs[maxind].first);
continue;
}
FT mid = wsum * .5;
auto it = std::lower_bound(pairs.get(), pairs.get() + nr, mid,
[](std::pair<ElementType_t<MT>, IT> x, FT y)
{
return x.first < y;
});
(*ret)[i] = it->first == mid ? FT(.5 * (it->first + it[1].first)): FT(it[1].first);
}
}
template<typename MT, bool SO, typename VT, bool TF, typename VT3=blz::CommonType_t<ElementType_t<MT>, ElementType_t<VT>>, typename=std::enable_if_t<std::is_arithmetic_v<VT3>>>
void l1_median(const blz::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret, const VT3 *weights=static_cast<VT3 *>(nullptr)) {
if(weights)
weighted_median(data, ret, weights);
else
l1_unweighted_median(data, ret);
}
template<typename MT, bool SO, typename VT, bool TF, typename VT3, typename=std::enable_if_t<!std::is_arithmetic_v<VT3> && !std::is_pointer_v<VT3>>>
void l1_median(const blz::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret, const VT3 &weights) {
weighted_median(data, ret, weights);
}
template<typename MT, bool SO, typename VT, bool TF, typename Rows, typename VT3=blz::CommonType_t<ElementType_t<MT>, ElementType_t<VT>>, typename=std::enable_if_t<blaze::IsRows_v<Rows>>>
void l1_median(const blz::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret, const Rows &rows, const VT3 *weights=static_cast<VT3 *>(nullptr)) {
if(weights) {
auto dr(blaze::rows(data, rows.data(), rows.size()));
const blz::CustomVector<VT3, blaze::unaligned, blaze::unpadded> cv((VT3 *)weights, (*data).rows());
blz::DynamicVector<VT3> selected_weights(blaze::elements(cv, rows.data(), rows.size()));
weighted_median(dr, ret, selected_weights.data());
} else l1_unweighted_median(data, rows, ret);
}
template<typename MT, bool SO, typename VT, bool TF, typename IT=uint64_t, typename WeightT=blz::DV<double>>
void l1_median(const blaze::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret, IT *asp, size_t nasn=0, const WeightT *weights=static_cast<WeightT *>(nullptr)) {
if(!asp) {
if(weights) {
weighted_median(data, ret, *weights);
} else {
l1_unweighted_median(data, ret);
}
} else {
if(weights) {
weighted_median(rows(*data, asp, nasn), ret, *weights);
} else {
l1_unweighted_median(rows(*data, asp, nasn), ret);
}
}
}
template<typename MT, bool SO, typename VT, bool TF, typename IT=uint64_t, typename WeightT=blz::DV<double>, typename RSums>
void tvd_median(const blaze::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret, IT *asp, size_t nasn=0, const WeightT *weights=static_cast<WeightT *>(nullptr), const RSums &rsums=RSums()) {
return l1_median(*data % blaze::expand(1. / rsums, (*data).columns()), ret, asp, nasn, weights);
}
} // namespace coresets
} // namespace minicore
#endif /* FGC_KMEDIAN_H__ */
|
splayTable.c | /*
Copyright 2007, 2008 Daniel Zerbino (zerbino@ebi.ac.uk)
This file is part of Velvet.
Velvet is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Velvet is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Velvet; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "globals.h"
#include "readSet.h"
#include "splay.h"
#include "tightString.h"
#include "utility.h"
#include "kmer.h"
#include "kmerOccurenceTable.h"
#include "recycleBin.h"
typedef struct mask_st Mask;
struct mask_st {
Coordinate start;
Coordinate finish;
Mask* next;
};
static RecycleBin * maskMemory = NULL;
static Mask *allocateMask()
{
if (maskMemory == NULL)
maskMemory = newRecycleBin(sizeof(Mask), 10000);
return (Mask *) allocatePointer(maskMemory);
}
static Mask * newMask(Coordinate position)
{
Mask * mask = allocateMask();
mask->start = position;
mask->finish = position;
mask->next = NULL;
return mask;
}
#define HASH_BUCKETS_NB 16777216
#ifdef _OPENMP
#define NB_PUSH 32
#define BUFFER_SIZE 4096
static StringBuffer **annotationBuffer = NULL;
static StringBuffer **annotationBufferW = NULL;
static int *nbPush = NULL;
static boolean producing = 1;
static void initAnnotationBuffers(void)
{
int n;
int i;
n = omp_get_max_threads();
annotationBuffer = callocOrExit(n, StringBuffer*);
annotationBufferW = callocOrExit(n, StringBuffer*);
nbPush = callocOrExit(n, int);
for (i = 0; i < n; i++)
{
annotationBuffer[i] = newStringBuffer(BUFFER_SIZE);
annotationBufferW[i] = newStringBuffer(BUFFER_SIZE);
}
}
static void destroyAnnotationBuffers(void)
{
int n;
int i;
n = omp_get_max_threads();
for (i = 0; i < n; i++)
{
destroyStringBuffer(annotationBuffer[i], 1);
destroyStringBuffer(annotationBufferW[i], 1);
}
free(annotationBuffer);
free(annotationBufferW);
free(nbPush);
annotationBuffer = NULL;
annotationBufferW = NULL;
nbPush = NULL;
}
static void pushBufferCommit(int thread)
{
StringBuffer *tmp;
char *s;
s = annotationBufferW[thread]->str;
do
{
#pragma omp flush(s)
}
while (*s);
tmp = annotationBufferW[thread];
annotationBufferW[thread] = annotationBuffer[thread];
annotationBuffer[thread] = tmp;
tmp = annotationBufferW[thread];
#pragma omp flush(tmp)
}
static void pushBuffer(int thread)
{
if (++nbPush[thread] == NB_PUSH)
{
nbPush[thread] = 0;
pushBufferCommit(thread);
}
}
static void writeBuffers(FILE *outFile, int nbThreads)
{
int i;
for (i = 0; i < nbThreads; i++)
{
StringBuffer *b;
char *s;
b = annotationBufferW[i];
#pragma omp flush(b)
s = b->str;
#pragma omp flush(s)
if (*s)
{
velvetFprintf(outFile, "%s", annotationBufferW[i]->str);
resetStringBuffer(annotationBufferW[i]);
}
}
}
static void bufferWritter(FILE *outFile)
{
int n;
n = omp_get_max_threads();
#pragma omp flush(producing)
while (producing)
{
writeBuffers(outFile, n);
#pragma omp flush(producing)
}
writeBuffers(outFile, n);
}
static void appendLine(char *line, int thread)
{
appendStringBuffer(annotationBuffer[thread], line);
}
#else
#define BUFFER_SIZE 1024
StringBuffer *annotationBuffer = NULL;
static void appendLine(char *line, int thread)
{
appendStringBuffer(annotationBuffer, line);
}
#endif
struct splayTable_st {
SplayTree **table;
#ifdef _OPENMP
omp_lock_t *tableLocks;
#endif
KmerOccurenceTable *kmerOccurenceTable;
int WORDLENGTH;
boolean double_strand;
};
SplayTable *newSplayTable(int WORDLENGTH, boolean double_strand)
{
SplayTable *splayTable = mallocOrExit(1, SplayTable);
splayTable->WORDLENGTH = WORDLENGTH;
splayTable->table = callocOrExit(HASH_BUCKETS_NB, SplayTree *);
splayTable->kmerOccurenceTable = NULL;
splayTable->double_strand = double_strand;
#ifdef _OPENMP
splayTable->tableLocks = mallocOrExit(HASH_BUCKETS_NB, omp_lock_t);
int i;
#pragma omp parallel for
for (i = 0; i < HASH_BUCKETS_NB; i++)
omp_init_lock(splayTable->tableLocks + i);
initSplayTreeMemory();
#endif
return splayTable;
}
void destroySplayTable(SplayTable * splayTable)
{
velvetLog("Destroying splay table\n");
destroyAllSplayTrees();
free(splayTable->table);
destroyKmerOccurenceTable(splayTable->kmerOccurenceTable);
free(splayTable);
velvetLog("Splay table destroyed\n");
}
static KmerKey hash_kmer(Kmer * kmer)
{
#if KMER_LONGLONGS
KmerKey key = kmer->longlongs[0];
#if KMER_LONGLONGS > 1
key ^= kmer->longlongs[1];
#endif
#if KMER_LONGLONGS > 2
key ^= kmer->longlongs[2];
#endif
key = (~key) + (key << 21);
key = key ^ (key >> 24);
key = (key + (key << 3)) + (key << 8);
key = key ^ (key >> 14);
key = (key + (key << 2)) + (key << 4);
key = key ^ (key >> 28);
key = key + (key << 31);
return key % HASH_BUCKETS_NB;
#elif KMER_LONGS
KmerKey key = kmer->longs;
key += ~(key << 15);
key ^= (key >> 10);
key += (key << 3);
key ^= (key >> 6);
key += ~(key << 11);
key ^= (key >> 16);
return key % HASH_BUCKETS_NB;
#elif KMER_INTS
return kmer->ints % HASH_BUCKETS_NB;
#elif KMER_CHARS
return kmer->chars % HASH_BUCKETS_NB;
#endif
}
static Coordinate getNearestHSPIndex(Coordinate position, IDnum * sequenceIDs, Coordinate sequenceLength) {
Coordinate back_offset = -1;
Coordinate front_offset = -1;
for (back_offset = 1; position - back_offset > 0; back_offset++)
if (sequenceIDs[position - back_offset])
break;
for (front_offset = 1; position + front_offset < sequenceLength; front_offset++)
if (sequenceIDs[position + front_offset])
break;
if (back_offset == position && position + front_offset == sequenceLength)
return -1;
else if (back_offset == position)
return position + front_offset;
else if (front_offset + position == sequenceLength)
return position - back_offset;
else
return back_offset < front_offset? position - back_offset : position + front_offset;
}
static KmerOccurence * getMostAppropriateHit(Coordinate readCoord, Coordinate readLength, boolean direct, KmerOccurence * kmerOccurence, IDnum mapCount, IDnum * mapSequenceID, Coordinate * mapCoord, int wordLength) {
KmerOccurence * current;
KmerOccurence * best = NULL;
Coordinate expectedPosition;
Coordinate positionError;
IDnum mapIndex;
// If only one hit
if (!getNextKmerOccurence(kmerOccurence))
return kmerOccurence;
// If multiple hits by unmapped read
if (mapCount == 0)
return NULL;
// Compare cases
for (current = kmerOccurence; current; current = getNextKmerOccurence(current)) {
for (mapIndex = 0; mapIndex < mapCount; mapIndex++) {
// If wrong sequence or unconsistent orientation
if ((direct && getKmerOccurenceNodeID(current) != mapSequenceID[mapIndex])
|| (!direct && getKmerOccurenceNodeID(current) != -mapSequenceID[mapIndex]))
continue;
// Compute where it is supposed to land on reference
if (mapSequenceID[mapIndex] < 0)
expectedPosition = mapCoord[mapIndex] + readLength - readCoord - 1;
else
expectedPosition = mapCoord[mapIndex] + readCoord - wordLength + 1;
// Compute positional error
positionError = getKmerOccurencePosition(current) - expectedPosition;
// If potential hit record
if (positionError < 1 && positionError > -1) {
if (best)
// If competing hit, give up
return NULL;
else
// Record current hit
best = current;
}
}
}
return best;
}
static inline boolean
doFindOrInsertOccurenceInSplayTree(Kmer * kmer, IDnum * seqID,
Coordinate * position, SplayTable *table)
{
#ifdef _OPENMP
const KmerKey kmerHash = hash_kmer(kmer);
boolean ret;
omp_set_lock(table->tableLocks + kmerHash);
ret = findOrInsertOccurenceInSplayTree(kmer, seqID, position,
table->table + kmerHash);
omp_unset_lock(table->tableLocks + kmerHash);
return ret;
#else
return findOrInsertOccurenceInSplayTree(kmer, seqID, position,
&table->table[hash_kmer(kmer)]);
#endif
}
static boolean findOrInsertOccurenceInSplayTable(Kmer * kmer, IDnum * seqID,
Coordinate * position,
SplayTable * table, IDnum * sequenceIDs,
Coordinate * coords, Coordinate readIndex, Coordinate readLength, boolean direct)
{
KmerOccurence * hit;
Coordinate HSPIndex;
// Check if previous anchor
if (sequenceIDs && sequenceIDs[readIndex]) {
if (direct)
*seqID = sequenceIDs[readIndex];
else
*seqID = -sequenceIDs[readIndex];
if (sequenceIDs[readIndex] > 0)
*position = coords[readIndex] + readIndex;
else
*position = coords[readIndex] - readIndex + readLength - 1;
return true;
}
else if (coords && coords[readIndex])
// If in buffer zone:
return doFindOrInsertOccurenceInSplayTree(kmer, seqID, position, table);
// Look up first in reference sequence k-mers
if (table->kmerOccurenceTable
&& (hit = findKmerInKmerOccurenceTable(kmer, table->kmerOccurenceTable))) {
if (!getNextKmerOccurence(hit)) {
*seqID = getKmerOccurenceNodeID(hit);
*position = getKmerOccurencePosition(hit);
return true;
} else if ((HSPIndex = getNearestHSPIndex(*position, sequenceIDs, readLength)) > 0) {
hit = getMostAppropriateHit(readIndex, readLength, direct, hit, 1, &(sequenceIDs[HSPIndex]), &(coords[HSPIndex]), table->WORDLENGTH);
if (hit) {
*seqID = getKmerOccurenceNodeID(hit);
*position = getKmerOccurencePosition(hit);
return true;
}
}
}
// If not, go through the novel k-mers
return doFindOrInsertOccurenceInSplayTree(kmer, seqID, position, table);
}
static void printAnnotations(IDnum *sequenceIDs, Coordinate * coords,
TightString * array, SplayTable * table,
FILE * file, boolean second_in_pair, IDnum seqID)
{
Coordinate readNucleotideIndex = 0;
Coordinate writeNucleotideIndex = 0;
Kmer word;
Kmer antiWord;
boolean annotationClosed = true;
IDnum sequenceID;
Coordinate coord;
boolean found;
Coordinate position = 0;
Coordinate start = 0;
Coordinate finish = 0;
IDnum referenceSequenceID = 0;
Nucleotide nucleotide;
char lineBuffer[MAXLINE];
TightString * tString = getTightStringInArray(array, seqID - 1);
int thread = 0;
clearKmer(&word);
clearKmer(&antiWord);
#ifdef _OPENMP
thread = omp_get_thread_num();
#endif
sprintf(lineBuffer, "ROADMAP %li\n", (long)seqID);
appendLine(lineBuffer, thread);
// Neglect any string shorter than WORDLENGTH :
if (getLength(tString) < table->WORDLENGTH) {
#ifdef _OPENMP
pushBuffer(thread);
#else
velvetFprintf(file, "%s", annotationBuffer->str);
resetStringBuffer(annotationBuffer);
#endif
return;
}
// Fill in the initial word :
for (readNucleotideIndex = 0;
readNucleotideIndex < table->WORDLENGTH - 1;
readNucleotideIndex++) {
nucleotide = getNucleotide(readNucleotideIndex, tString);
pushNucleotide(&word, nucleotide);
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
while (readNucleotideIndex < getLength(tString)) {
// Shift word:
nucleotide = getNucleotide(readNucleotideIndex, tString);
pushNucleotide(&word, nucleotide);
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
sequenceID = seqID;
coord = writeNucleotideIndex;
if (table->double_strand) {
if (compareKmers(&word, &antiWord) <= 0) {
found =
findOrInsertOccurenceInSplayTable(&word,
&sequenceID,
&coord,
table,
sequenceIDs,
coords,
readNucleotideIndex,
getLength(tString),
true);
} else {
sequenceID = -sequenceID;
found =
findOrInsertOccurenceInSplayTable(&antiWord,
&sequenceID,
&coord,
table,
sequenceIDs,
coords,
readNucleotideIndex,
getLength(tString),
false);
sequenceID = -sequenceID;
if( sequenceID!=seqID && sequenceID<0 ){
printf("*** sequenceID = %d\n", sequenceID);
exit(-1);
}
}
} else {
if (!second_in_pair) {
found =
findOrInsertOccurenceInSplayTable(&word,
&sequenceID,
&coord,
table,
sequenceIDs,
coords,
readNucleotideIndex,
getLength(tString),
true);
} else {
sequenceID = -sequenceID;
found =
findOrInsertOccurenceInSplayTable(&antiWord,
&sequenceID,
&coord,
table,
sequenceIDs,
coords,
readNucleotideIndex,
getLength(tString),
false);
sequenceID = -sequenceID;
}
}
if (!found) {
writeNucleotideIndex++;
if (!annotationClosed) {
sprintf(lineBuffer, "%ld\t%lld\t%lld\t%lld\n",
(long) referenceSequenceID, (long long) position,
(long long) start, (long long) finish);
appendLine(lineBuffer, thread);
}
annotationClosed = true;
}
// Otherwise create/complete annotation:
else {
// Forbidden k-mer
if (sequenceID == 0) {
break;
}
// Closed/inexistant annotation
else if (annotationClosed) {
referenceSequenceID = sequenceID;
position = writeNucleotideIndex;
start = finish = coord;
if (referenceSequenceID > 0)
finish++;
else
finish--;
annotationClosed = false;
}
// Open annotation
else if (sequenceID == referenceSequenceID
&& coord == finish) {
if (referenceSequenceID > 0)
finish++;
else
finish--;
}
// Previous non corresponding annotation
else {
sprintf(lineBuffer, "%ld\t%lld\t%lld\t%lld\n",
(long) referenceSequenceID, (long long) position,
(long long) start, (long long) finish);
appendLine(lineBuffer, thread);
referenceSequenceID = sequenceID;
position = writeNucleotideIndex;
start = finish = coord;
if (referenceSequenceID > 0)
finish++;
else
finish--;
}
}
readNucleotideIndex++;
}
if (!annotationClosed) {
sprintf(lineBuffer, "%ld\t%lld\t%lld\t%lld\n",
(long) referenceSequenceID, (long long) position,
(long long) start, (long long) finish);
appendLine(lineBuffer, thread);
}
#ifdef _OPENMP
pushBuffer(thread);
#else
velvetFprintf(file, "%s", annotationBuffer->str);
resetStringBuffer(annotationBuffer);
#endif
return;
}
static void computeClearHSPs(TightString * array, FILE * seqFile, boolean second_in_pair, SplayTable * table, IDnum ** sequenceIDs, Coordinate ** coords, IDnum seqID) {
Coordinate readNucleotideIndex = 0;
Kmer word;
Kmer antiWord;
Kmer polyA;
Nucleotide nucleotide;
KmerOccurence * hit;
char line[MAXLINE];
char* start;
char c;
Coordinate mapCount = 0;
Coordinate maxCount = 10;
IDnum * mapReferenceIDs = callocOrExit(maxCount, IDnum);
Coordinate * mapCoords = callocOrExit(maxCount, Coordinate);
long long_var;
long long longlong_var;
int penalty;
TightString * tString;
Coordinate length;
clearKmer(&polyA);
#ifdef _OPENMP
#pragma omp critical
{
#endif
// Get read ID:
if (!fgets(line, MAXLINE, seqFile)) {
puts("Incomplete Sequences file (computeHSPScores)");
#ifdef DEBUG
abort();
#endif
exit(1);
}
start = strchr(line, '\t');
tString = getTightStringInArray(array, seqID - 1);
length = getLength(tString);
*sequenceIDs = callocOrExit(length, IDnum);
*coords = callocOrExit(length, Coordinate);
// Parse file for mapping info
while (seqFile && (c = getc(seqFile)) != EOF) {
if (c == '>')
break;
fgets(line, MAXLINE, seqFile);
if (c == 'M') {
sscanf(line,"\t%li\t%lli\n", &long_var, &longlong_var);
mapReferenceIDs[mapCount] = (IDnum) long_var;
mapCoords[mapCount] = (Coordinate) longlong_var;
if (++mapCount == maxCount) {
maxCount *= 2;
mapReferenceIDs = reallocOrExit(mapReferenceIDs, maxCount, IDnum);
mapCoords = reallocOrExit(mapCoords, maxCount, Coordinate);
}
}
}
#ifdef _OPENMP
}
#endif
// First pass for unambiguous hits
// Fill in the initial word :
clearKmer(&word);
clearKmer(&antiWord);
for (readNucleotideIndex = 0;
readNucleotideIndex < table->WORDLENGTH - 1;
readNucleotideIndex++) {
nucleotide = getNucleotide(readNucleotideIndex, tString);
pushNucleotide(&word, nucleotide);
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
// Kill silly poly-T beginnings
while (readNucleotideIndex < getLength(tString) && (compareKmers(&antiWord, &polyA) == 0 || compareKmers(&word, &polyA) == 0)) {
nucleotide = getNucleotide(readNucleotideIndex++, tString);
pushNucleotide(&word, nucleotide);
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
while (readNucleotideIndex < getLength(tString)) {
// Shift word:
nucleotide = getNucleotide(readNucleotideIndex, tString);
pushNucleotide(&word, nucleotide);
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
if (table->double_strand) {
if (compareKmers(&word, &antiWord) <= 0) {
hit = findKmerInKmerOccurenceTable(&word, table->kmerOccurenceTable);
if (hit && (hit = getMostAppropriateHit(readNucleotideIndex, getLength(tString), true, hit, mapCount, mapReferenceIDs, mapCoords, table->WORDLENGTH)))
(*sequenceIDs)[readNucleotideIndex] = getKmerOccurenceNodeID(hit);
} else {
hit = findKmerInKmerOccurenceTable(&antiWord, table->kmerOccurenceTable);
if (hit && (hit = getMostAppropriateHit(readNucleotideIndex, getLength(tString), false, hit, mapCount, mapReferenceIDs, mapCoords, table->WORDLENGTH)))
(*sequenceIDs)[readNucleotideIndex] = -getKmerOccurenceNodeID(hit);
}
} else {
if (!second_in_pair) {
hit = findKmerInKmerOccurenceTable(&word, table->kmerOccurenceTable);
if (hit && (hit = getMostAppropriateHit(readNucleotideIndex, getLength(tString), true, hit, mapCount, mapReferenceIDs, mapCoords, table->WORDLENGTH)))
(*sequenceIDs)[readNucleotideIndex] = getKmerOccurenceNodeID(hit);
} else {
hit = findKmerInKmerOccurenceTable(&antiWord, table->kmerOccurenceTable);
if (hit && (hit = getMostAppropriateHit(readNucleotideIndex, getLength(tString), false, hit, mapCount, mapReferenceIDs, mapCoords, table->WORDLENGTH)))
(*sequenceIDs)[readNucleotideIndex] = -getKmerOccurenceNodeID(hit);
}
}
if ((*sequenceIDs)[readNucleotideIndex]) {
if ((*sequenceIDs)[readNucleotideIndex] > 0)
(*coords)[readNucleotideIndex] = getKmerOccurencePosition(hit) - readNucleotideIndex;
else
(*coords)[readNucleotideIndex] = getKmerOccurencePosition(hit) + readNucleotideIndex - getLength(tString) + 1;
}
// Barrier to flip-flopping
if ((*sequenceIDs)[readNucleotideIndex - 1] != 0
&& ((*sequenceIDs)[readNucleotideIndex] != (*sequenceIDs)[readNucleotideIndex - 1]
|| (*coords)[readNucleotideIndex] != (*coords)[readNucleotideIndex - 1])) {
// Break in continuity... skip k positions
(*sequenceIDs)[readNucleotideIndex] = 0;
(*coords)[readNucleotideIndex] = -1;
readNucleotideIndex++;
for (penalty = 0; penalty < table->WORDLENGTH - 1 && readNucleotideIndex < getLength(tString); penalty++) {
nucleotide = getNucleotide(readNucleotideIndex, tString);
pushNucleotide(&word, nucleotide);
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
(*sequenceIDs)[readNucleotideIndex] = 0;
(*coords)[readNucleotideIndex] = -1;
readNucleotideIndex++;
}
} else
readNucleotideIndex++;
}
free(mapReferenceIDs);
free(mapCoords);
}
void inputSequenceIntoSplayTable(TightString * array,
SplayTable * table,
FILE * file, FILE * seqFile,
boolean second_in_pair,
IDnum seqID)
{
IDnum * sequenceIDs = NULL;
Coordinate * coords = NULL;
// If appropriate, get the HSPs on reference sequences
if (table->kmerOccurenceTable)
computeClearHSPs(array, seqFile, second_in_pair, table, &sequenceIDs, &coords, seqID);
// Go through read, eventually with annotations
printAnnotations(sequenceIDs, coords, array, table, file, second_in_pair, seqID);
// Clean up
if (sequenceIDs) {
free(sequenceIDs);
free(coords);
}
}
void inputReferenceIntoSplayTable(TightString * tString,
SplayTable * table, FILE * file, IDnum seqID, Mask * mask)
{
IDnum currentIndex;
Coordinate readNucleotideIndex = 0;
Coordinate kmerIndex = 0;
Kmer word;
Kmer antiWord;
Nucleotide nucleotide;
Mask * currentMask = mask;
#ifdef _OPENMP
char lineBuffer[MAXLINE];
#endif
clearKmer(&word);
clearKmer(&antiWord);
currentIndex = seqID;
#ifdef _OPENMP
sprintf(lineBuffer, "ROADMAP %li\n", (long)currentIndex);
appendLine(lineBuffer, omp_get_thread_num());
#else
velvetFprintf(file, "ROADMAP %li\n", (long)currentIndex);
#endif
// Neglect any string shorter than WORDLENGTH :
if (getLength(tString) < table->WORDLENGTH) {
return;
}
// Fill in the initial word :
for (readNucleotideIndex = 0;
readNucleotideIndex < table->WORDLENGTH - 1;
readNucleotideIndex++) {
nucleotide = getNucleotide(readNucleotideIndex, tString);
pushNucleotide(&word, nucleotide);
if (table->double_strand) {
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
}
while (readNucleotideIndex < getLength(tString)) {
// Shift word:
nucleotide = getNucleotide(readNucleotideIndex, tString);
pushNucleotide(&word, nucleotide);
if (table->double_strand) {
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
// Check for gap masks:
if (currentMask && currentMask->start - table->WORDLENGTH + 1 <= readNucleotideIndex) {
while(currentMask && currentMask->finish + table->WORDLENGTH - 1 < readNucleotideIndex)
currentMask = currentMask->next;
if (currentMask && currentMask->finish + table->WORDLENGTH - 1 >= readNucleotideIndex) {
readNucleotideIndex++;
kmerIndex++;
continue;
}
}
// Record k-mer
if (table->double_strand) {
if (compareKmers(&word, &antiWord) <= 0)
recordKmerOccurence(&word, currentIndex,
kmerIndex,
table->kmerOccurenceTable);
else
recordKmerOccurence(&antiWord, -currentIndex,
kmerIndex,
table->kmerOccurenceTable);
} else {
recordKmerOccurence(&word, currentIndex,
kmerIndex,
table->kmerOccurenceTable);
}
readNucleotideIndex++;
kmerIndex++;
}
return;
}
static Coordinate countReferenceKmers(ReadSet * reads, int wordLength) {
IDnum readIndex;
Coordinate length = 0;
for (readIndex = 0; readIndex < reads->readCount && reads->categories[readIndex] == REFERENCE; readIndex++)
{
Coordinate tmpLength = getLength(getTightStringInArray(reads->tSequences, readIndex));
if (tmpLength >= wordLength)
length += tmpLength - wordLength + 1;
}
return length;
}
Mask ** scanReferenceSequences(FILE * file, IDnum referenceSequenceCount) {
Mask ** referenceMasks = callocOrExit(referenceSequenceCount, Mask*);
IDnum index;
char line[MAXLINE];
char c;
// Search sequences for masks
for (index = 0; index < referenceSequenceCount; index++) {
Mask * current = NULL;
Coordinate position = 0;
boolean openMask = false;
// Read through header
fgets(line, MAXLINE, file);
// Read through sequence
while ((c = getc(file))) {
if (c == EOF || c == '>')
break;
else if (c == '\r' || c == '\n')
continue;
else if (c == 'n' || c == 'N') {
if (openMask)
current->finish++;
else if (referenceMasks[index] == NULL) {
referenceMasks[index] = newMask(position);
current = referenceMasks[index];
} else {
current->next = newMask(position);
current = current->next;
}
openMask = true;
position++;
} else {
openMask = false;
position++;
}
}
}
return referenceMasks;
}
void inputSequenceArrayIntoSplayTableAndArchive(ReadSet * reads,
SplayTable * table,
char *filename, char* seqFilename)
{
IDnum index;
IDnum sequenceCount = reads->readCount;
TightString *array;
FILE *outfile = fopen(filename, "w");
FILE *seqFile = NULL;
IDnum kmerCount;
IDnum referenceSequenceCount = 0;
struct timeval start, end, diff;
// DEBUG
Mask ** referenceMasks;
if (outfile == NULL)
exitErrorf(EXIT_FAILURE, true, "Couldn't write to file %s", filename);
else
velvetLog("Writing into roadmap file %s...\n", filename);
// Count reference sequences
for (index = 0; index < reads->readCount && reads->categories[index] == REFERENCE; index++)
referenceSequenceCount++;
velvetFprintf(outfile, "%ld\t%ld\t%i\t%hi\n", (long) sequenceCount, (long) referenceSequenceCount, table->WORDLENGTH, (short) table->double_strand);
if (reads->tSequences == NULL)
convertSequences(reads);
gettimeofday(&start, NULL);
array = reads->tSequences;
#ifdef _OPENMP
if (omp_get_max_threads() == 1)
{
omp_set_num_threads(2);
omp_set_nested(0);
}
else
omp_set_nested(1);
initAnnotationBuffers();
#else
annotationBuffer = newStringBuffer(BUFFER_SIZE);
#endif
if (referenceSequenceCount && (kmerCount = countReferenceKmers(reads, table->WORDLENGTH)) > 0) {
table->kmerOccurenceTable = newKmerOccurenceTable(24 , table->WORDLENGTH);
allocateKmerOccurences(kmerCount, table->kmerOccurenceTable);
seqFile = fopen(seqFilename, "r");
if (seqFile == NULL)
exitErrorf(EXIT_FAILURE, true, "Couldn't write to file %s", seqFilename);
else
velvetLog("Reading mapping info from file %s\n", seqFilename);
// Skip through reference headers quickly
referenceMasks = scanReferenceSequences(seqFile, referenceSequenceCount);
#ifdef _OPENMP
producing = 1;
#pragma omp parallel sections
{
#pragma omp section
{
bufferWritter(outfile);
}
#pragma omp section
{
#pragma omp parallel for
#endif
for (index = 0; index < referenceSequenceCount; index++)
inputReferenceIntoSplayTable(getTightStringInArray(array, index),
table, outfile, index + 1, referenceMasks[index]);
#ifdef _OPENMP
for (index = omp_get_max_threads() - 1; index >= 0; index--)
pushBufferCommit(index);
producing = 0;
#pragma omp flush(producing)
}
}
#endif
if (maskMemory)
destroyRecycleBin(maskMemory);
sortKmerOccurenceTable(table->kmerOccurenceTable);
}
velvetLog("Inputting sequences...\n");
#ifdef _OPENMP
producing = 1;
#pragma omp parallel sections
{
#pragma omp section
{
bufferWritter(outfile);
}
#pragma omp section
{
#pragma omp parallel for
#endif
for (index = referenceSequenceCount; index < sequenceCount; index++)
{
boolean second_in_pair;
// Progress report on screen
if (index % 1000000 == 0) {
velvetLog("Inputting sequence %li / %li\n",
(long)index, (long)sequenceCount);
fflush(stdout);
}
// Test to make sure that all the reference reads are before all the other reads
if (reads->categories[index] == REFERENCE) {
velvetLog("Reference sequence placed after a non-reference read!\n");
velvetLog(">> Please re-order the filenames in your command line so as "
"to have the reference sequence files before all the others\n");
#ifdef DEBUG
abort();
#endif
exit(0);
}
second_in_pair = reads->categories[index] % 2 && isSecondInPair(reads, index);
// Hashing the reads
inputSequenceIntoSplayTable(array, table,
outfile, seqFile,
second_in_pair, index + 1);
}
#ifdef _OPENMP
for (index = omp_get_max_threads() - 1; index >= 0; index--)
pushBufferCommit(index);
producing = 0;
#pragma omp flush(producing)
}
}
destroyAnnotationBuffers();
#else
destroyStringBuffer(annotationBuffer, 1);
#endif
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
velvetLog(" === Sequences loaded in %ld.%06ld s\n", diff.tv_sec, diff.tv_usec);
fclose(outfile);
if (seqFile)
fclose(seqFile);
//free(reads->tSequences);
//reads->tSequences = NULL;
//destroyReadSet(reads);
velvetLog("Done inputting sequences\n");
}
|
for-7.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-ompexp" } */
extern void bar(int);
void foo (int n)
{
int i;
#pragma omp for schedule(static) ordered
for (i = 0; i < n; ++i)
bar(i);
}
/* { dg-final { scan-tree-dump-times "GOMP_loop_ordered_static_start" 1 "ompexp" } } */
/* { dg-final { scan-tree-dump-times "GOMP_loop_ordered_static_next" 1 "ompexp" } } */
|
sink-4.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-omplower" } */
/* Test that we adjust pointer offsets for sink variables
correctly. */
typedef struct {
char stuff[400];
} foo;
void
funk (foo *begin, foo *end)
{
foo *p;
#pragma omp parallel for ordered(1)
for (p=end; p > begin; p--)
{
#pragma omp ordered depend(sink:p+1)
void bar ();
bar();
#pragma omp ordered depend(source)
}
}
/* { dg-final { scan-tree-dump-times "depend\\(sink:p\\+400\\)" 1 "omplower" } } */
|
mrpt.h | #ifndef CPP_MRPT_H_
#define CPP_MRPT_H_
#include <algorithm>
#include <cmath>
#include <functional>
#include <map>
#include <numeric>
#include <random>
#include <set>
#include <stdexcept>
#include <string>
#include <utility>
#include <vector>
#include <Eigen/Dense>
#include <Eigen/SparseCore>
struct Mrpt_Parameters {
int n_trees = 0; /**< Number of trees in the index. */
int depth = 0; /**< Depth of the trees in the index. */
int k = 0; /**< Number of nearest neighbors searched for (if the index is autotuned; otherwise 0). */
int votes = 0; /**< Optimal vote threshold (if the index is autotuned and the target recall is set; otherwise 0). */
double estimated_qtime = 0.0; /**< Estimated query time (if the index is autotuned and the target recall is set; otherwise 0.0). */
double estimated_recall = 0.0; /**< Estimated recall (if the index is autotuned and the target recall is set; otherwise 0.0). */
};
class Mrpt {
public:
/** @name Constructors
* The constructor does not actually build the index. The building is done
* by the function grow() which has to be called before queries can be made.
* There are two different versions of the constructor which differ only
* by the type of the input data. The first version takes the data set
* as `Ref` to `MatrixXf`, which means that the argument
* can be either `MatrixXf` or `Map<MatrixXf>` (also certain blocks of `MatrixXf`
* may be accepted, see [Eigen::Ref](https://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html)
* for more information). The second version takes a float
* pointer to an array containing the data set, and the dimension and
* the sample size of the data. There are also corresponding versions
* of all the member functions which take input data. In all cases the data
* is assumed to be stored in column-major order such that each data point
* is stored contiguously in memory. In all cases no copies are made of
* the original data matrix. */
/**
* @param X_ Eigen ref to the data set, stored as one data point per column
*/
Mrpt(const Eigen::Ref<const Eigen::MatrixXf> &X_) :
X(Eigen::Map<const Eigen::MatrixXf>(X_.data(), X_.rows(), X_.cols())),
n_samples(X_.cols()),
dim(X_.rows()) {}
/**
* @param X_ a float array containing the data set with each data point
* stored contiguously in memory
* @param dim_ dimension of the data
* @param n_samples_ number of data points
*/
Mrpt(const float *X_, int dim_, int n_samples_) :
X(Eigen::Map<const Eigen::MatrixXf>(X_, dim_, n_samples_)),
n_samples(n_samples_),
dim(dim_) {}
/**@}*/
/** @name Normal index building.
* Build a normal (not autotuned) index.
*/
/**
* Build a normal index.
*
* @param n_trees_ number of trees to be grown
* @param depth_ depth of the trees; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ is the number
* of data points
* @param density_ expected proportion of non-zero components in the
* random vectors; on the interval \f$(0,1]\f$; default value sets density to
* \f$ 1 / \sqrt{d} \f$, where \f$d\f$ is the dimension of the data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
*/
void grow(int n_trees_, int depth_, float density_ = -1.0, int seed = 0) {
if (!empty()) {
throw std::logic_error("The index has already been grown.");
}
if (n_trees_ <= 0) {
throw std::out_of_range("The number of trees must be positive.");
}
if (depth_ <= 0 || depth_ > std::log2(n_samples)) {
throw std::out_of_range("The depth must belong to the set {1, ... , log2(n)}.");
}
if (density_ < -1.0001 || density_ > 1.0001 || (density_ > -0.9999 && density_ < -0.0001)) {
throw std::out_of_range("The density must be on the interval (0,1].");
}
n_trees = n_trees_;
depth = depth_;
n_pool = n_trees_ * depth_;
n_array = 1 << (depth_ + 1);
if (density_ < 0) {
density = 1.0 / std::sqrt(dim);
} else {
density = density_;
}
density < 1 ? build_sparse_random_matrix(sparse_random_matrix, n_pool, dim, density, seed) :
build_dense_random_matrix(dense_random_matrix, n_pool, dim, seed);
split_points = Eigen::MatrixXf(n_array, n_trees);
tree_leaves = std::vector<std::vector<int>>(n_trees);
count_first_leaf_indices_all(leaf_first_indices_all, n_samples, depth);
leaf_first_indices = leaf_first_indices_all[depth];
#pragma omp parallel for
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
Eigen::MatrixXf tree_projections;
if (density < 1)
tree_projections.noalias() = sparse_random_matrix.middleRows(n_tree * depth, depth) * X;
else
tree_projections.noalias() = dense_random_matrix.middleRows(n_tree * depth, depth) * X;
tree_leaves[n_tree] = std::vector<int>(n_samples);
std::vector<int> &indices = tree_leaves[n_tree];
std::iota(indices.begin(), indices.end(), 0);
grow_subtree(indices.begin(), indices.end(), 0, 0, n_tree, tree_projections);
}
}
/**@}*/
/** @name Autotuned index building
* Builds an index by autotuning such that the parameters giving the fastest
* query time at the target recall level are found. If the target recall level
* is not reached at all, then an index giving the highest recall level
* is built. The parameters() function can be used to retrieve these optimal
* parameter values and the estimated query time and the estimated recall.
* There is a version which uses a separate set of test queries (`grow`),
* and a version which samples a test set from the data set (`grow_autotune`).
*/
/**
* Build an autotuned index.
*
* @param target_recall target recall level; on the range [0,1]
* @param Q Eigen ref to the the test queries (col = data point, row = dimension).
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max maximum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density expected proportion of non-zero components in the random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
*/
void grow(double target_recall, const Eigen::Ref<const Eigen::MatrixXf> &Q, int k_, int trees_max = -1,
int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1,
float density = -1.0, int seed = 0) {
if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) {
throw std::out_of_range("Target recall must be on the interval [0,1].");
}
grow(Q, k_, trees_max, depth_max, depth_min_, votes_max_, density, seed);
prune(target_recall);
}
/** Build an autotuned index.
*
* @param target_recall target recall level; on the range [0,1]
* @param Q float array containing the test queries
* @param n_test number of test queries
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max maximum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density expected proportion of non-zero components in the random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
* @param indices_test parameter used by the version which uses no
* separate test set, leave empty.
*/
void grow(double target_recall, const float *Q, int n_test, int k_, int trees_max = -1,
int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1,
float density = -1.0, int seed = 0, const std::vector<int> &indices_test = {}) {
if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) {
throw std::out_of_range("Target recall must be on the interval [0,1].");
}
grow(Q, n_test, k_, trees_max, depth_max, depth_min_, votes_max_, density, seed, indices_test);
prune(target_recall);
}
/** Build an autotuned index sampling test queries from the training set.
*
* @param target_recall target recall level; on the range [0,1]
* @param n_test number of test queries
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max maximum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density_ expected proportion of non-zero components in the random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
* @param n_test number of test queries sampled from the training set.
*/
void grow_autotune(double target_recall, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1,
int votes_max_ = -1, float density_ = -1.0, int seed = 0, int n_test = 100) {
if (n_test < 1) {
throw std::out_of_range("Test set size must be > 0.");
}
n_test = n_test > n_samples ? n_samples : n_test;
std::vector<int> indices_test(sample_indices(n_test, seed));
const Eigen::MatrixXf Q(subset(indices_test));
grow(target_recall, Q.data(), Q.cols(), k_, trees_max,
depth_max, depth_min_, votes_max_, density_, seed, indices_test);
}
/**
* Get the optimal parameters and the estimated recall and query time found
* by autotuning. If the index is autotuned without preset recall level,
* `estimated_recall`, `estimated_qtime` and `votes` are set to their
* default value 0, and `n_trees` and `depth` are set to `trees_max` and
* `depth_max, respectively. If the index is not autotuned,
* `estimated_recall`, `estimated_qtime`, `votes` and `k` are all set to
* their default value 0.
*
* @return parameters of the index
*/
Mrpt_Parameters parameters() const {
if (index_type == normal || index_type == autotuned_unpruned) {
Mrpt_Parameters p;
p.n_trees = n_trees;
p.depth = depth;
p.k = par.k;
return p;
}
return par;
}
/**
* Get whether the index has been autotuned.
*
* @return true if the index has been autotuned, false otherwise.
*/
bool is_autotuned() const {
return index_type == autotuned;
}
/**@}*/
/** @name Autotuned index building without preset recall level
* Build an autotuned index. This version does not require prespecifying
* a target recall level, but an index generated by this function can be used
* to subset different indices with different recall levels. This is done by
* subset(). The function optimal_parameters() can be used to retrieve a
* pareto frontier of optimal parameters. There is a version which uses a
* separate set of test queries (`grow`), and a version which samples a
* test set from the data set (`grow_autotune`).
*/
/**@{*/
/** Build an autotuned index without prespecifying a recall level.
*
* @param data a float array containing the test queries.
* @param n_test number of test queries
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max maximum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density_ expected proportion of non-zero components in the random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
* @param indices_test parameter used by the version which uses no
* separate test set, leave empty.
**/
void grow(const float *data, int n_test, int k_, int trees_max = -1, int depth_max = -1,
int depth_min_ = -1, int votes_max_ = -1, float density_ = -1.0, int seed = 0,
const std::vector<int> &indices_test = {}) {
if (trees_max == - 1) {
trees_max = std::min(std::sqrt(n_samples), 1000.0);
}
if (depth_min_ == -1) {
depth_min_ = std::max(static_cast<int>(std::log2(n_samples) - 11), 5);
}
if (depth_max == -1) {
depth_max = std::max(static_cast<int>(std::log2(n_samples) - 4), depth_min_);
}
if (votes_max_ == -1) {
votes_max_ = std::max(trees_max / 10, std::min(trees_max, 10));
}
if (density_ > -1.0001 && density_ < -0.9999) {
density_ = 1.0 / std::sqrt(dim);
}
if (!empty()) {
throw std::logic_error("The index has already been grown.");
}
if (k_ <= 0 || k_ > n_samples) {
throw std::out_of_range("k_ must belong to the set {1, ..., n}.");
}
if (trees_max <= 0) {
throw std::out_of_range("trees_max must be positive.");
}
if (depth_max <= 0 || depth_max > std::log2(n_samples)) {
throw std::out_of_range("depth_max must belong to the set {1, ... , log2(n)}.");
}
if (depth_min_ <= 0 || depth_min_ > depth_max) {
throw std::out_of_range("depth_min_ must belong to the set {1, ... , depth_max}");
}
if (votes_max_ <= 0 || votes_max_ > trees_max) {
throw std::out_of_range("votes_max_ must belong to the set {1, ... , trees_max}.");
}
if (density_ < 0.0 || density_ > 1.0001) {
throw std::out_of_range("The density must be on the interval (0,1].");
}
if(n_samples < 101) {
throw std::out_of_range("Sample size must be at least 101 to autotune an index.");
}
depth_min = depth_min_;
votes_max = votes_max_;
k = k_;
const Eigen::Map<const Eigen::MatrixXf> Q(data, dim, n_test);
grow(trees_max, depth_max, density_, seed);
Eigen::MatrixXi exact(k, n_test);
compute_exact(Q, exact, indices_test);
std::vector<Eigen::MatrixXd> recalls(depth_max - depth_min + 1);
cs_sizes = std::vector<Eigen::MatrixXd>(depth_max - depth_min + 1);
for (int d = depth_min; d <= depth_max; ++d) {
recalls[d - depth_min] = Eigen::MatrixXd::Zero(votes_max, trees_max);
cs_sizes[d - depth_min] = Eigen::MatrixXd::Zero(votes_max, trees_max);
}
for (int i = 0; i < n_test; ++i) {
std::vector<Eigen::MatrixXd> recall_tmp(depth_max - depth_min + 1);
std::vector<Eigen::MatrixXd> cs_size_tmp(depth_max - depth_min + 1);
count_elected(Q.col(i), Eigen::Map<Eigen::VectorXi>(exact.data() + i * k, k),
votes_max, recall_tmp, cs_size_tmp);
for (int d = depth_min; d <= depth_max; ++d) {
recalls[d - depth_min] += recall_tmp[d - depth_min];
cs_sizes[d - depth_min] += cs_size_tmp[d - depth_min];
}
}
for (int d = depth_min; d <= depth_max; ++d) {
recalls[d - depth_min] /= (k * n_test);
cs_sizes[d - depth_min] /= n_test;
}
fit_times(Q);
std::set<Mrpt_Parameters,decltype(is_faster)*> pars = list_parameters(recalls);
opt_pars = pareto_frontier(pars);
index_type = autotuned_unpruned;
par.k = k_;
}
/** Build an autotuned index without prespecifying a recall level.
*
* @param Q Eigen ref to the test queries.
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max depth of trees grown; ; on the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters on the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density_ expected proportion of non-zero components of random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
*/
void grow(const Eigen::Ref<const Eigen::MatrixXf> &Q, int k_, int trees_max = -1, int depth_max = -1,
int depth_min_ = -1, int votes_max_ = -1, float density_ = -1.0, int seed = 0) {
if (Q.rows() != dim) {
throw std::invalid_argument("Dimensions of the data and the validation set do not match.");
}
grow(Q.data(), Q.cols(), k_, trees_max,
depth_max, depth_min_, votes_max_, density_, seed);
}
/** Build an autotuned index sampling test queries from the training set
* and without prespecifying a recall level.
*
* @param k_ number of nearest neighbors searched for
* @param trees_max number of trees grown; default value -1 sets this to
* \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points.
* @param depth_max depth of trees grown; in the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$
* is the number of data points; default value -1 sets this to
* \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points
* @param depth_min_ minimum depth of trees considered when searching for
* optimal parameters on the set
* \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1
* sets this to \f$ \mathrm{max}(\lfloor \log_2 (n) \rfloor - 11, 5)\f$
* @param votes_max_ maximum number of votes considered when searching for
* optimal parameters; a default value -1 sets this to
* \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor,
* \mathrm{min}(10, \mathrm{trees\_max})) \f$
* @param density_ expected proportion of non-zero components of random vectors;
* default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is
* the dimension of data
* @param seed seed given to a rng when generating random vectors;
* a default value 0 initializes the rng randomly with std::random_device
* @param n_test number of test queries sampled from the training set.
*/
void grow_autotune(int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1,
int votes_max_ = -1, float density_ = -1.0, int seed = 0, int n_test = 100) {
if (n_test < 1) {
throw std::out_of_range("Test set size must be > 0.");
}
n_test = n_test > n_samples ? n_samples : n_test;
std::vector<int> indices_test(sample_indices(n_test, seed));
const Eigen::MatrixXf Q(subset(indices_test));
grow(Q.data(), Q.cols(), k_, trees_max,
depth_max, depth_min_, votes_max_, density_, seed, indices_test);
}
/** Create a new index by copying trees from an autotuned index grown
* without a prespecified recall level. The index is created so that
* it gives a fastest query time at the recall level given as the parameter.
* If this recall level is not met, then it creates an index with a
* highest possible recall level.
*
* @param target_recall target recall level; on the range [0,1]
* @return an autotuned Mrpt index with a recall level at least as high as
* target_recall
*/
Mrpt subset(double target_recall) const {
if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) {
throw std::out_of_range("Target recall must be on the interval [0,1].");
}
Mrpt index2(X);
index2.par = parameters(target_recall);
int depth_max = depth;
index2.n_trees = index2.par.n_trees;
index2.depth = index2.par.depth;
index2.votes = index2.par.votes;
index2.n_pool = index2.depth * index2.n_trees;
index2.n_array = 1 << (index2.depth + 1);
index2.tree_leaves.assign(tree_leaves.begin(), tree_leaves.begin() + index2.n_trees);
index2.leaf_first_indices_all = leaf_first_indices_all;
index2.density = density;
index2.k = k;
index2.split_points = split_points.topLeftCorner(index2.n_array, index2.n_trees);
index2.leaf_first_indices = leaf_first_indices_all[index2.depth];
if (index2.density < 1) {
index2.sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(index2.n_pool, index2.dim);
for (int n_tree = 0; n_tree < index2.n_trees; ++n_tree)
index2.sparse_random_matrix.middleRows(n_tree * index2.depth, index2.depth) =
sparse_random_matrix.middleRows(n_tree * depth_max, index2.depth);
} else {
index2.dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(index2.n_pool, index2.dim);
for (int n_tree = 0; n_tree < index2.n_trees; ++n_tree)
index2.dense_random_matrix.middleRows(n_tree * index2.depth, index2.depth) =
dense_random_matrix.middleRows(n_tree * depth_max, index2.depth);
}
index2.index_type = autotuned;
return index2;
}
/** Create a new index by copying trees from an autotuned index grown
* without a prespecified recall level. The index is created so that
* it gives a fastest query time at the recall level given as the parameter.
* If this recall level is not met, then it creates an index with a
* highest possible recall level. This function differs from subset() only
* by the return value.
*
* @param target_recall target recall level; on the range [0,1]
* @return pointer to a dynamically allocated autotuned Mrpt index with
* a recall level at least as high as target_recall
*/
Mrpt *subset_pointer(double target_recall) const {
if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) {
throw std::out_of_range("Target recall must be on the interval [0,1].");
}
Mrpt *index2 = new Mrpt(X);
index2->par = parameters(target_recall);
int depth_max = depth;
index2->n_trees = index2->par.n_trees;
index2->depth = index2->par.depth;
index2->votes = index2->par.votes;
index2->n_pool = index2->depth * index2->n_trees;
index2->n_array = 1 << (index2->depth + 1);
index2->tree_leaves.assign(tree_leaves.begin(), tree_leaves.begin() + index2->n_trees);
index2->leaf_first_indices_all = leaf_first_indices_all;
index2->density = density;
index2->k = k;
index2->split_points = split_points.topLeftCorner(index2->n_array, index2->n_trees);
index2->leaf_first_indices = leaf_first_indices_all[index2->depth];
if (index2->density < 1) {
index2->sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(index2->n_pool, index2->dim);
for (int n_tree = 0; n_tree < index2->n_trees; ++n_tree)
index2->sparse_random_matrix.middleRows(n_tree * index2->depth, index2->depth) =
sparse_random_matrix.middleRows(n_tree * depth_max, index2->depth);
} else {
index2->dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(index2->n_pool, index2->dim);
for (int n_tree = 0; n_tree < index2->n_trees; ++n_tree)
index2->dense_random_matrix.middleRows(n_tree * index2->depth, index2->depth) =
dense_random_matrix.middleRows(n_tree * depth_max, index2->depth);
}
index2->index_type = autotuned;
return index2;
}
/**
* Return the pareto frontier of optimal parameters for an index which
* is autotuned without setting a recall level. This means that each
* parameter combination in a returned vector is optimal in a sense
* that it is a fastest (measured by query time) parameter combination
* to obtain as least as high recall level that it has.
*
* @return vector of optimal parameters
*/
std::vector<Mrpt_Parameters> optimal_parameters() const {
if (index_type == normal) {
throw std::logic_error("The list of optimal parameters cannot be retrieved for the non-autotuned index.");
}
if (index_type == autotuned) {
throw std::logic_error("The list of optimal parameters cannot be retrieved for the index which has already been subsetted or deleted to the target recall level.");
}
std::vector<Mrpt_Parameters> new_pars;
std::copy(opt_pars.begin(), opt_pars.end(), std::back_inserter(new_pars));
return new_pars;
}
/**@}*/
/** @name Approximate k-nn search
* A query using a non-autotuned index. Finds k approximate nearest neighbors
* from a data set X for a query point q. Because the index is not autotuned,
* k and vote threshold are set manually. The indices of k nearest neighbors
* are written to a buffer out, which has to be preallocated to have at least
* length k. Optionally also Euclidean distances to these k nearest points
* are written to a buffer out_distances. If there are less than k points in
* the candidate set, -1 is written to the remaining locations of the
* output buffers.
*/
/**
* Approximate k-nn search using a normal index.
*
* @param data pointer to an array containing the query point
* @param k number of nearest neighbors searched for
* @param vote_threshold - number of votes required for a query point to be included in the candidate set
* @param out output buffer (size = k) for the indices of k approximate nearest neighbors
* @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors
* @param out_n_elected optional output parameter (size = 1) for the candidate set size
*/
void query(const float *data, int k, int vote_threshold, int *out,
float *out_distances = nullptr, int *out_n_elected = nullptr) const {
if (k <= 0 || k > n_samples) {
throw std::out_of_range("k must belong to the set {1, ..., n}.");
}
if (vote_threshold <= 0 || vote_threshold > n_trees) {
throw std::out_of_range("vote_threshold must belong to the set {1, ... , n_trees}.");
}
if (empty()) {
throw std::logic_error("The index must be built before making queries.");
}
const Eigen::Map<const Eigen::VectorXf> q(data, dim);
Eigen::VectorXf projected_query(n_pool);
if (density < 1)
projected_query.noalias() = sparse_random_matrix * q;
else
projected_query.noalias() = dense_random_matrix * q;
std::vector<int> found_leaves(n_trees);
/*
* The following loops over all trees, and routes the query to exactly one
* leaf in each.
*/
#pragma omp parallel for
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
int idx_tree = 0;
for (int d = 0; d < depth; ++d) {
const int j = n_tree * depth + d;
const int idx_left = 2 * idx_tree + 1;
const int idx_right = idx_left + 1;
const float split_point = split_points(idx_tree, n_tree);
if (projected_query(j) <= split_point) {
idx_tree = idx_left;
} else {
idx_tree = idx_right;
}
}
found_leaves[n_tree] = idx_tree - (1 << depth) + 1;
}
int n_elected = 0, max_leaf_size = n_samples / (1 << depth) + 1;
Eigen::VectorXi elected(n_trees * max_leaf_size);
Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples);
// count votes
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
int leaf_begin = leaf_first_indices[found_leaves[n_tree]];
int leaf_end = leaf_first_indices[found_leaves[n_tree] + 1];
const std::vector<int> &indices = tree_leaves[n_tree];
for (int i = leaf_begin; i < leaf_end; ++i) {
int idx = indices[i];
if (++votes(idx) == vote_threshold)
elected(n_elected++) = idx;
}
}
if (out_n_elected) {
*out_n_elected = n_elected;
}
exact_knn(q, k, elected, n_elected, out, out_distances);
}
/**
* Approximate k-nn search using a normal index.
*
* @param q Eigen ref to the query point
* @param k number of nearest neighbors searched for
* @param vote_threshold number of votes required for a query point to be included in the candidate set
* @param out output buffer (size = k) for the indices of k approximate nearest neighbors
* @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors
* @param out_n_elected optional output parameter (size = 1) for the candidate set size
*/
void query(const Eigen::Ref<const Eigen::VectorXf> &q, int k, int vote_threshold, int *out,
float *out_distances = nullptr, int *out_n_elected = nullptr) const {
query(q.data(), k, vote_threshold, out, out_distances, out_n_elected);
}
/**@}*/
/** @name Approximate k-nn search using autotuned index
* Approximate k-nn search using an autotuned index. Finds k approximate
* nearest neighbors from a data set X for a query point q. Because the index
* is autotuned, no parameters other than a query point and an output are
* required: k is preset, and the optimal vote count is used automatically.
* The indices of k nearest neighbors are written to a buffer out, which has
* to be preallocated to have at least length k. Optionally also the Euclidean
* distances to these k nearest points are written to a buffer
* out_distances. If there are less than k points in the candidate set,
* -1 is written to the remaining locations of the output buffers.
*/
/**
* Approximate k-nn search using an autotuned index.
*
* @param q pointer to an array containing the query point
* @param out output buffer (size = k) for the indices of k approximate nearest neighbors
* @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors
* @param out_n_elected optional output parameter (size = 1) for the candidate set size
*/
void query(const float *q, int *out, float *out_distances = nullptr,
int *out_n_elected = nullptr) const {
if (index_type == normal) {
throw std::logic_error("The index is not autotuned: k and vote threshold has to be specified.");
}
if (index_type == autotuned_unpruned) {
throw std::logic_error("The target recall level has to be set before making queries.");
}
query(q, k, votes, out, out_distances, out_n_elected);
}
/**
* Approximate k-nn search using an autotuned index.
*
* @param q Eigen ref to the query point
* @param out output buffer (size = k) for the indices of k approximate nearest neighbors
* @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors
* @param out_n_elected optional output parameter (size = 1) for the candidate set size
*/
void query(const Eigen::Ref<const Eigen::VectorXf> &q, int *out, float *out_distances = nullptr,
int *out_n_elected = nullptr) const {
query(q.data(), out, out_distances, out_n_elected);
}
/**@}*/
/** @name Exact k-nn search
* Functions for fast exact k-nn search: find k nearest neighbors for a
* query point q from a data set X_. The indices of k nearest neighbors are
* written to a buffer out, which has to be preallocated to have at least
* length k. Optionally also the Euclidean distances to these k nearest points
* are written to a buffer out_distances. There are both static and member
* versions.
*/
/**
* @param q_data pointer to an array containing the query point
* @param X_data pointer to an array containing the data set
* @param dim dimension of data
* @param n_samples number of points in a data set
* @param k number of neighbors searched for
* @param out output buffer (size = k) for the indices of k nearest neighbors
* @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors
*/
static void exact_knn(const float *q_data, const float *X_data, int dim, int n_samples,
int k, int *out, float *out_distances = nullptr) {
const Eigen::Map<const Eigen::MatrixXf> X(X_data, dim, n_samples);
const Eigen::Map<const Eigen::VectorXf> q(q_data, dim);
if (k < 1 || k > n_samples) {
throw std::out_of_range("k must be positive and no greater than the sample size of data X.");
}
Eigen::VectorXf distances(n_samples);
#pragma omp parallel for
for (int i = 0; i < n_samples; ++i)
distances(i) = (X.col(i) - q).squaredNorm();
if (k == 1) {
Eigen::MatrixXf::Index index;
distances.minCoeff(&index);
out[0] = index;
if (out_distances)
out_distances[0] = std::sqrt(distances(index));
return;
}
Eigen::VectorXi idx(n_samples);
std::iota(idx.data(), idx.data() + n_samples, 0);
std::partial_sort(idx.data(), idx.data() + k, idx.data() + n_samples,
[&distances](int i1, int i2) { return distances(i1) < distances(i2); });
for (int i = 0; i < k; ++i)
out[i] = idx(i);
if (out_distances) {
for (int i = 0; i < k; ++i)
out_distances[i] = std::sqrt(distances(idx(i)));
}
}
/**
* @param q Eigen ref to a query point
* @param X Eigen ref to a data set
* @param k number of neighbors searched for
* @param out output buffer (size = k) for the indices of k nearest neighbors
* @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors
*/
static void exact_knn(const Eigen::Ref<const Eigen::VectorXf> &q,
const Eigen::Ref<const Eigen::MatrixXf> &X,
int k, int *out, float *out_distances = nullptr) {
Mrpt::exact_knn(q.data(), X.data(), X.rows(), X.cols(), k, out, out_distances);
}
/**
* @param q pointer to an array containing the query point
* @param k number of neighbors searched for
* @param out output buffer (size = k) for the indices of k nearest neighbors
* @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors
*/
void exact_knn(const float *q, int k, int *out, float *out_distances = nullptr) const {
Mrpt::exact_knn(q, X.data(), dim, n_samples, k, out, out_distances);
}
/**
* @param q pointer to an array containing the query point
* @param k number of points searched for
* @param out output buffer (size = k) for the indices of k nearest neighbors
* @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors
*/
void exact_knn(const Eigen::Ref<const Eigen::VectorXf> &q, int k, int *out,
float *out_distances = nullptr) const {
Mrpt::exact_knn(q.data(), X.data(), dim, n_samples, k, out, out_distances);
}
/**@}*/
/** @name Utility functions
* Saving and loading an index and checking if it is already constructed.
* Saving and loading work for both autotuned and non-autotuned indices, and
* load() retrieves also the optimal parameters found by autotuning.
* The same data set used to build a saved index has to be used to
* construct the index into which it is loaded.
*/
/**
* Saves the index to a file.
*
* @param path - filepath to the output file.
* @return true if saving succeeded, false otherwise.
*/
bool save(const char *path) const {
FILE *fd;
if ((fd = fopen(path, "wb")) == NULL)
return false;
int i = index_type;
fwrite(&i, sizeof(int), 1, fd);
if (index_type == 2) {
write_parameter_list(opt_pars, fd);
}
write_parameters(&par, fd);
fwrite(&n_trees, sizeof(int), 1, fd);
fwrite(&depth, sizeof(int), 1, fd);
fwrite(&density, sizeof(float), 1, fd);
fwrite(split_points.data(), sizeof(float), n_array * n_trees, fd);
// save tree leaves
for (int i = 0; i < n_trees; ++i) {
int sz = tree_leaves[i].size();
fwrite(&sz, sizeof(int), 1, fd);
fwrite(&tree_leaves[i][0], sizeof(int), sz, fd);
}
// save random matrix
if (density < 1) {
int non_zeros = sparse_random_matrix.nonZeros();
fwrite(&non_zeros, sizeof(int), 1, fd);
for (int k = 0; k < sparse_random_matrix.outerSize(); ++k) {
for (Eigen::SparseMatrix<float, Eigen::RowMajor>::InnerIterator it(sparse_random_matrix, k); it; ++it) {
float val = it.value();
int row = it.row(), col = it.col();
fwrite(&row, sizeof(int), 1, fd);
fwrite(&col, sizeof(int), 1, fd);
fwrite(&val, sizeof(float), 1, fd);
}
}
} else {
fwrite(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd);
}
fclose(fd);
return true;
}
/**
* Loads an index from a file.
*
* @param path filepath to the index file.
* @return true if loading succeeded, false otherwise.
*/
bool load(const char *path) {
FILE *fd;
if ((fd = fopen(path, "rb")) == NULL)
return false;
int i;
fread(&i, sizeof(int), 1, fd);
index_type = static_cast<itype>(i);
if (index_type == autotuned_unpruned) {
read_parameter_list(fd);
}
read_parameters(&par, fd);
fread(&n_trees, sizeof(int), 1, fd);
fread(&depth, sizeof(int), 1, fd);
fread(&density, sizeof(float), 1, fd);
n_pool = n_trees * depth;
n_array = 1 << (depth + 1);
count_first_leaf_indices_all(leaf_first_indices_all, n_samples, depth);
leaf_first_indices = leaf_first_indices_all[depth];
split_points = Eigen::MatrixXf(n_array, n_trees);
fread(split_points.data(), sizeof(float), n_array * n_trees, fd);
// load tree leaves
tree_leaves = std::vector<std::vector<int>>(n_trees);
for (int i = 0; i < n_trees; ++i) {
int sz;
fread(&sz, sizeof(int), 1, fd);
std::vector<int> leaves(sz);
fread(&leaves[0], sizeof(int), sz, fd);
tree_leaves[i] = leaves;
}
// load random matrix
if (density < 1) {
int non_zeros;
fread(&non_zeros, sizeof(int), 1, fd);
sparse_random_matrix = Eigen::SparseMatrix<float>(n_pool, dim);
std::vector<Eigen::Triplet<float>> triplets;
for (int k = 0; k < non_zeros; ++k) {
int row, col;
float val;
fread(&row, sizeof(int), 1, fd);
fread(&col, sizeof(int), 1, fd);
fread(&val, sizeof(float), 1, fd);
triplets.push_back(Eigen::Triplet<float>(row, col, val));
}
sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end());
sparse_random_matrix.makeCompressed();
} else {
dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(n_pool, dim);
fread(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd);
}
fclose(fd);
k = par.k;
votes = par.votes;
return true;
}
/**
* Is the index is already constructed or not?
*
* @return - is the index empty?
*/
bool empty() const {
return n_trees == 0;
}
/**@}*/
/** @name
* Friend declarations for test fixtures. Tests are located at
* https://github.com/vioshyvo/RP-test.
*/
friend class MrptTest;
friend class UtilityTest;
/**@}*/
private:
/**
* Builds a single random projection tree. The tree is constructed by recursively
* projecting the data on a random vector and splitting into two by the median.
*/
void grow_subtree(std::vector<int>::iterator begin, std::vector<int>::iterator end,
int tree_level, int i, int n_tree, const Eigen::MatrixXf &tree_projections) {
int n = end - begin;
int idx_left = 2 * i + 1;
int idx_right = idx_left + 1;
if (tree_level == depth) return;
std::nth_element(begin, begin + n / 2, end,
[&tree_projections, tree_level] (int i1, int i2) {
return tree_projections(tree_level, i1) < tree_projections(tree_level, i2);
});
auto mid = end - n / 2;
if (n % 2) {
split_points(i, n_tree) = tree_projections(tree_level, *(mid - 1));
} else {
auto left_it = std::max_element(begin, mid,
[&tree_projections, tree_level] (int i1, int i2) {
return tree_projections(tree_level, i1) < tree_projections(tree_level, i2);
});
split_points(i, n_tree) = (tree_projections(tree_level, *mid) +
tree_projections(tree_level, *left_it)) / 2.0;
}
grow_subtree(begin, mid, tree_level + 1, idx_left, n_tree, tree_projections);
grow_subtree(mid, end, tree_level + 1, idx_right, n_tree, tree_projections);
}
/**
* Find k nearest neighbors from data for the query point
*/
void exact_knn(const Eigen::Map<const Eigen::VectorXf> &q, int k, const Eigen::VectorXi &indices,
int n_elected, int *out, float *out_distances = nullptr) const {
if (!n_elected) {
for (int i = 0; i < k; ++i)
out[i] = -1;
if (out_distances) {
for (int i = 0; i < k; ++i)
out_distances[i] = -1;
}
return;
}
Eigen::VectorXf distances(n_elected);
#pragma omp parallel for
for (int i = 0; i < n_elected; ++i)
distances(i) = (X.col(indices(i)) - q).squaredNorm();
if (k == 1) {
Eigen::MatrixXf::Index index;
distances.minCoeff(&index);
out[0] = n_elected ? indices(index) : -1;
if (out_distances)
out_distances[0] = n_elected ? std::sqrt(distances(index)) : -1;
return;
}
int n_to_sort = n_elected > k ? k : n_elected;
Eigen::VectorXi idx(n_elected);
std::iota(idx.data(), idx.data() + n_elected, 0);
std::partial_sort(idx.data(), idx.data() + n_to_sort, idx.data() + n_elected,
[&distances](int i1, int i2) { return distances(i1) < distances(i2); });
for (int i = 0; i < k; ++i)
out[i] = i < n_elected ? indices(idx(i)) : -1;
if (out_distances) {
for (int i = 0; i < k; ++i)
out_distances[i] = i < n_elected ? std::sqrt(distances(idx(i))) : -1;
}
}
void prune(double target_recall) {
if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) {
throw std::out_of_range("Target recall must be on the interval [0,1].");
}
par = parameters(target_recall);
if (!par.n_trees) {
return;
}
int depth_max = depth;
n_trees = par.n_trees;
depth = par.depth;
votes = par.votes;
n_pool = depth * n_trees;
n_array = 1 << (depth + 1);
tree_leaves.resize(n_trees);
tree_leaves.shrink_to_fit();
split_points.conservativeResize(n_array, n_trees);
leaf_first_indices = leaf_first_indices_all[depth];
if (density < 1) {
Eigen::SparseMatrix<float, Eigen::RowMajor> srm_new(n_pool, dim);
for (int n_tree = 0; n_tree < n_trees; ++n_tree)
srm_new.middleRows(n_tree * depth, depth) = sparse_random_matrix.middleRows(n_tree * depth_max, depth);
sparse_random_matrix = srm_new;
} else {
Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> drm_new(n_pool, dim);
for (int n_tree = 0; n_tree < n_trees; ++n_tree)
drm_new.middleRows(n_tree * depth, depth) = dense_random_matrix.middleRows(n_tree * depth_max, depth);
dense_random_matrix = drm_new;
}
index_type = autotuned;
}
void count_elected(const Eigen::VectorXf &q, const Eigen::Map<Eigen::VectorXi> &exact, int votes_max,
std::vector<Eigen::MatrixXd> &recalls, std::vector<Eigen::MatrixXd> &cs_sizes) const {
Eigen::VectorXf projected_query(n_pool);
if (density < 1)
projected_query.noalias() = sparse_random_matrix * q;
else
projected_query.noalias() = dense_random_matrix * q;
int depth_min = depth - recalls.size() + 1;
std::vector<std::vector<int>> start_indices(n_trees);
#pragma omp parallel for
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
start_indices[n_tree] = std::vector<int>(depth - depth_min + 1);
int idx_tree = 0;
for (int d = 0; d < depth; ++d) {
const int j = n_tree * depth + d;
const int idx_left = 2 * idx_tree + 1;
const int idx_right = idx_left + 1;
const float split_point = split_points(idx_tree, n_tree);
if (projected_query(j) <= split_point) {
idx_tree = idx_left;
} else {
idx_tree = idx_right;
}
if (d >= depth_min - 1)
start_indices[n_tree][d - depth_min + 1] = idx_tree - (1 << (d + 1)) + 1;
}
}
const int *exact_begin = exact.data();
const int *exact_end = exact.data() + exact.size();
for (int depth_crnt = depth_min; depth_crnt <= depth; ++depth_crnt) {
Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples);
const std::vector<int> &leaf_first_indices = leaf_first_indices_all[depth_crnt];
Eigen::MatrixXd recall(votes_max, n_trees);
Eigen::MatrixXd candidate_set_size(votes_max, n_trees);
recall.col(0) = Eigen::VectorXd::Zero(votes_max);
candidate_set_size.col(0) = Eigen::VectorXd::Zero(votes_max);
// count votes
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
std::vector<int> &found_leaves = start_indices[n_tree];
if (n_tree) {
recall.col(n_tree) = recall.col(n_tree - 1);
candidate_set_size.col(n_tree) = candidate_set_size.col(n_tree - 1);
}
int leaf_begin = leaf_first_indices[found_leaves[depth_crnt - depth_min]];
int leaf_end = leaf_first_indices[found_leaves[depth_crnt - depth_min] + 1];
const std::vector<int> &indices = tree_leaves[n_tree];
for (int i = leaf_begin; i < leaf_end; ++i) {
int idx = indices[i];
int v = ++votes(idx);
if (v <= votes_max) {
candidate_set_size(v - 1, n_tree)++;
if (std::find(exact_begin, exact_end, idx) != exact_end)
recall(v - 1, n_tree)++;
}
}
}
recalls[depth_crnt - depth_min] = recall;
cs_sizes[depth_crnt - depth_min] = candidate_set_size;
}
}
/**
* Builds a random sparse matrix for use in random projection. The components of
* the matrix are drawn from the distribution
*
* 0 w.p. 1 - a
* N(0, 1) w.p. a
*
* where a = density.
*/
static void build_sparse_random_matrix(Eigen::SparseMatrix<float, Eigen::RowMajor> &sparse_random_matrix,
int n_row, int n_col, float density, int seed = 0) {
sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(n_row, n_col);
std::random_device rd;
int s = seed ? seed : rd();
std::mt19937 gen(s);
std::uniform_real_distribution<float> uni_dist(0, 1);
std::normal_distribution<float> norm_dist(0, 1);
std::vector<Eigen::Triplet<float>> triplets;
for (int j = 0; j < n_row; ++j) {
for (int i = 0; i < n_col; ++i) {
if (uni_dist(gen) > density) continue;
triplets.push_back(Eigen::Triplet<float>(j, i, norm_dist(gen)));
}
}
sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end());
sparse_random_matrix.makeCompressed();
}
/*
* Builds a random dense matrix for use in random projection. The components of
* the matrix are drawn from the standard normal distribution.
*/
static void build_dense_random_matrix(Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> &dense_random_matrix,
int n_row, int n_col, int seed = 0) {
dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(n_row, n_col);
std::random_device rd;
int s = seed ? seed : rd();
std::mt19937 gen(s);
std::normal_distribution<float> normal_dist(0, 1);
std::generate(dense_random_matrix.data(), dense_random_matrix.data() + n_row * n_col,
[&normal_dist, &gen] { return normal_dist(gen); });
}
void compute_exact(const Eigen::Map<const Eigen::MatrixXf> &Q, Eigen::MatrixXi &out_exact,
const std::vector<int> &indices_test = {}) const {
int n_test = Q.cols();
Eigen::VectorXi idx(n_samples);
std::iota(idx.data(), idx.data() + n_samples, 0);
for (int i = 0; i < n_test; ++i) {
if(!indices_test.empty()) {
std::remove(idx.data(), idx.data() + n_samples, indices_test[i]);
}
exact_knn(Eigen::Map<const Eigen::VectorXf>(Q.data() + i * dim, dim), k, idx,
(indices_test.empty() ? n_samples : n_samples - 1), out_exact.data() + i * k);
std::sort(out_exact.data() + i * k, out_exact.data() + i * k + k);
if(!indices_test.empty()) {
idx[n_samples - 1] = indices_test[i];
}
}
}
static bool is_faster(const Mrpt_Parameters &par1, const Mrpt_Parameters &par2) {
return par1.estimated_qtime < par2.estimated_qtime;
}
void vote(const Eigen::VectorXf &projected_query, int vote_threshold, Eigen::VectorXi &elected,
int &n_elected, int n_trees, int depth_crnt) {
std::vector<int> found_leaves(n_trees);
const std::vector<int> &leaf_first_indices = leaf_first_indices_all[depth_crnt];
#pragma omp parallel for
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
int idx_tree = 0;
for (int d = 0; d < depth_crnt; ++d) {
const int j = n_tree * depth + d;
const int idx_left = 2 * idx_tree + 1;
const int idx_right = idx_left + 1;
const float split_point = split_points(idx_tree, n_tree);
if (projected_query(j) <= split_point) {
idx_tree = idx_left;
} else {
idx_tree = idx_right;
}
}
found_leaves[n_tree] = idx_tree - (1 << depth_crnt) + 1;
}
int max_leaf_size = n_samples / (1 << depth_crnt) + 1;
elected = Eigen::VectorXi(n_trees * max_leaf_size);
Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples);
// count votes
for (int n_tree = 0; n_tree < n_trees; ++n_tree) {
int leaf_begin = leaf_first_indices[found_leaves[n_tree]];
int leaf_end = leaf_first_indices[found_leaves[n_tree] + 1];
const std::vector<int> &indices = tree_leaves[n_tree];
for (int i = leaf_begin; i < leaf_end; ++i) {
int idx = indices[i];
if (++votes(idx) == vote_threshold)
elected(n_elected++) = idx;
}
}
}
std::pair<double,double> fit_projection_times(const Eigen::Map<const Eigen::MatrixXf> &Q,
std::vector<int> &exact_x) {
std::vector<double> projection_times, projection_x;
long double idx_sum = 0;
std::vector<int> tested_trees {1,2,3,4,5,7,10,15,20,25,30,40,50};
generate_x(tested_trees, n_trees, 10, n_trees);
for (int d = depth_min; d <= depth; ++d) {
for (int i = 0; i < (int) tested_trees.size(); ++i) {
int t = tested_trees[i];
int n_random_vectors = t * d;
projection_x.push_back(n_random_vectors);
Eigen::SparseMatrix<float, Eigen::RowMajor> sparse_mat;
Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> dense_mat;
if (density < 1) {
build_sparse_random_matrix(sparse_mat, n_random_vectors, dim, density);
} else {
build_dense_random_matrix(dense_mat, n_random_vectors, dim);
}
double start_proj = omp_get_wtime();
Eigen::VectorXf projected_query(n_random_vectors);
if (density < 1) {
projected_query.noalias() = sparse_mat * Q.col(0);
} else {
projected_query.noalias() = dense_mat * Q.col(0);
}
double end_proj = omp_get_wtime();
projection_times.push_back(end_proj - start_proj);
idx_sum += projected_query.norm();
int votes_index = votes_max < t ? votes_max : t;
for (int v = 1; v <= votes_index; ++v) {
int cs_size = get_candidate_set_size(t, d, v);
if (cs_size > 0) exact_x.push_back(cs_size);
}
}
}
// use results to ensure that the compiler does not optimize away the timed code.
projection_x[0] += idx_sum > 1.0 ? 0.0000 : 0.0001;
return fit_theil_sen(projection_x, projection_times);
}
std::vector<std::map<int,std::pair<double,double>>> fit_voting_times(const Eigen::Map<const Eigen::MatrixXf> &Q) {
int n_test = Q.cols();
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_int_distribution<int> uni(0, n_test - 1);
std::vector<int> tested_trees {1,2,3,4,5,7,10,15,20,25,30,40,50};
generate_x(tested_trees, n_trees, 10, n_trees);
std::vector<int> vote_thresholds_x {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
generate_x(vote_thresholds_x, votes_max, 10, votes_max);
beta_voting = std::vector<std::map<int,std::pair<double,double>>>();
for (int d = depth_min; d <= depth; ++d) {
std::map<int,std::pair<double,double>> beta;
for (const auto &v : vote_thresholds_x) {
long double idx_sum = 0;
std::vector<double> voting_times, voting_x;
for (int i = 0; i < (int) tested_trees.size(); ++i) {
int t = tested_trees[i];
int n_el = 0;
Eigen::VectorXi elected;
auto ri = uni(rng);
Eigen::VectorXf projected_query(n_trees * depth);
if (density < 1) {
projected_query.noalias() = sparse_random_matrix * Q.col(ri);
} else {
projected_query.noalias() = dense_random_matrix * Q.col(ri);
}
double start_voting = omp_get_wtime();
vote(projected_query, v, elected, n_el, t, d);
double end_voting = omp_get_wtime();
voting_times.push_back(end_voting - start_voting);
voting_x.push_back(t);
for (int i = 0; i < n_el; ++i)
idx_sum += elected(i);
}
voting_x[0] += idx_sum > 1.0 ? 0.0 : 0.00001;
beta[v] = fit_theil_sen(voting_x, voting_times);
}
beta_voting.push_back(beta);
}
return beta_voting;
}
static void generate_x(std::vector<int> &x, int max_generated, int n_tested, int max_val) {
n_tested = max_generated > n_tested ? n_tested : max_val;
int increment = max_generated / n_tested;
for (int i = 1; i <= n_tested; ++i) {
if (std::find(x.begin(), x.end(), i * increment) == x.end() && i * increment <= max_generated) {
x.push_back(i * increment);
}
}
auto end = std::remove_if(x.begin(), x.end(), [max_val](int t) { return t > max_val; });
x.erase(end, x.end());
}
std::pair<double,double> fit_exact_times(const Eigen::Map<const Eigen::MatrixXf> &Q) {
std::vector<int> s_tested {1,2,5,10,20,35,50,75,100,150,200,300,400,500};
generate_x(s_tested, n_samples / 20, 20, n_samples);
int n_test = Q.cols();
std::vector<double> exact_times;
long double idx_sum = 0;
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_int_distribution<int> uni(0, n_test - 1);
std::uniform_int_distribution<int> uni2(0, n_samples - 1);
std::vector<double> ex;
int n_sim = 20;
for (int i = 0; i < (int) s_tested.size(); ++i) {
double mean_exact_time = 0;
int s_size = s_tested[i];
ex.push_back(s_size);
for (int m = 0; m < n_sim; ++m) {
auto ri = uni(rng);
Eigen::VectorXi elected(s_size);
for (int j = 0; j < elected.size(); ++j)
elected(j) = uni2(rng);
double start_exact = omp_get_wtime();
std::vector<int> res(k);
exact_knn(Eigen::Map<const Eigen::VectorXf>(Q.data() + ri * dim, dim), k, elected, s_size, &res[0]);
double end_exact = omp_get_wtime();
mean_exact_time += (end_exact - start_exact);
for (int l = 0; l < k; ++l)
idx_sum += res[l];
}
mean_exact_time /= n_sim;
exact_times.push_back(mean_exact_time);
}
ex[0] += idx_sum > 1.0 ? 0.0 : 0.00001;
return fit_theil_sen(ex, exact_times);
}
std::set<Mrpt_Parameters,decltype(is_faster)*> list_parameters(const std::vector<Eigen::MatrixXd> &recalls) {
std::set<Mrpt_Parameters,decltype(is_faster)*> pars(is_faster);
std::vector<Eigen::MatrixXd> query_times(depth - depth_min + 1);
for (int d = depth_min; d <= depth; ++d) {
Eigen::MatrixXd query_time = Eigen::MatrixXd::Zero(votes_max, n_trees);
for (int t = 1; t <= n_trees; ++t) {
int votes_index = votes_max < t ? votes_max : t;
for (int v = 1; v <= votes_index; ++v) {
double qt = get_query_time(t, d, v);
query_time(v - 1, t - 1) = qt;
Mrpt_Parameters p;
p.n_trees = t;
p.depth = d;
p.votes = v;
p.k = k;
p.estimated_qtime = qt;
p.estimated_recall = recalls[d - depth_min](v - 1, t - 1);
pars.insert(p);
}
}
query_times[d - depth_min] = query_time;
}
return pars;
}
std::set<Mrpt_Parameters,decltype(is_faster)*> pareto_frontier(const std::set<Mrpt_Parameters,decltype(is_faster)*> &pars) {
opt_pars = std::set<Mrpt_Parameters,decltype(is_faster)*>(is_faster);
double best_recall = -1.0;
for (const auto &p : pars) { // compute pareto frontier for query times and recalls
if (p.estimated_recall > best_recall) {
opt_pars.insert(p);
best_recall = p.estimated_recall;
}
}
return opt_pars;
}
void fit_times(const Eigen::Map<const Eigen::MatrixXf> &Q) {
std::vector<int> exact_x;
beta_projection = fit_projection_times(Q, exact_x);
beta_voting = fit_voting_times(Q);
beta_exact = fit_exact_times(Q);
}
static std::pair<double,double> fit_theil_sen(const std::vector<double> &x,
const std::vector<double> &y) {
int n = x.size();
std::vector<double> slopes;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i != j)
slopes.push_back((y[j] - y[i]) / (x[j] - x[i]));
}
}
int n_slopes = slopes.size();
std::nth_element(slopes.begin(), slopes.begin() + n_slopes / 2, slopes.end());
double slope = *(slopes.begin() + n_slopes / 2);
std::vector<double> residuals(n);
for (int i = 0; i < n; ++i)
residuals[i] = y[i] - slope * x[i];
std::nth_element(residuals.begin(), residuals.begin() + n / 2, residuals.end());
double intercept = *(residuals.begin() + n / 2);
return std::make_pair(intercept, slope);
}
void write_parameters(const Mrpt_Parameters *p, FILE *fd) const {
if (!fd) {
return;
}
fwrite(&p->n_trees, sizeof(int), 1, fd);
fwrite(&p->depth, sizeof(int), 1, fd);
fwrite(&p->votes, sizeof(int), 1, fd);
fwrite(&p->k, sizeof(int), 1, fd);
fwrite(&p->estimated_qtime, sizeof(double), 1, fd);
fwrite(&p->estimated_recall, sizeof(double), 1, fd);
}
void read_parameters(Mrpt_Parameters *p, FILE *fd) {
fread(&p->n_trees, sizeof(int), 1, fd);
fread(&p->depth, sizeof(int), 1, fd);
fread(&p->votes, sizeof(int), 1, fd);
fread(&p->k, sizeof(int), 1, fd);
fread(&p->estimated_qtime, sizeof(double), 1, fd);
fread(&p->estimated_recall, sizeof(double), 1, fd);
}
void write_parameter_list(const std::set<Mrpt_Parameters,decltype(is_faster)*> &pars, FILE *fd) const {
if (!fd) {
return;
}
int par_sz = pars.size();
fwrite(&par_sz, sizeof(int), 1, fd);
for (const auto p : pars)
write_parameters(&p, fd);
}
void read_parameter_list(FILE *fd) {
if (!fd) {
return;
}
opt_pars = std::set<Mrpt_Parameters,decltype(is_faster)*>(is_faster);
int par_sz = 0;
fread(&par_sz, sizeof(int), 1, fd);
for (int i = 0; i < par_sz; ++i) {
Mrpt_Parameters p;
read_parameters(&p, fd);
opt_pars.insert(p);
}
}
Mrpt_Parameters parameters(double target_recall) const {
double tr = target_recall - epsilon;
for (const auto &p : opt_pars) {
if (p.estimated_recall > tr) {
return p;
}
}
if (!opt_pars.empty()) {
return *(opt_pars.rbegin());
}
return Mrpt_Parameters();
}
/**
* Computes the leaf sizes of a tree assuming a median split and that
* when the number points is odd, the extra point is always assigned to
* to the left branch.
*/
static void count_leaf_sizes(int n, int level, int tree_depth, std::vector<int> &out_leaf_sizes) {
if (level == tree_depth) {
out_leaf_sizes.push_back(n);
return;
}
count_leaf_sizes(n - n / 2, level + 1, tree_depth, out_leaf_sizes);
count_leaf_sizes(n / 2, level + 1, tree_depth, out_leaf_sizes);
}
/**
* Computes indices of the first elements of leaves in a vector containing
* all the leaves of a tree concatenated. Assumes that median split is used
* and when the number points is odd, the extra point is always assigned to
* the left branch.
*/
static void count_first_leaf_indices(std::vector<int> &indices, int n, int depth) {
std::vector<int> leaf_sizes;
count_leaf_sizes(n, 0, depth, leaf_sizes);
indices = std::vector<int>(leaf_sizes.size() + 1);
indices[0] = 0;
for (int i = 0; i < (int) leaf_sizes.size(); ++i)
indices[i + 1] = indices[i] + leaf_sizes[i];
}
static void count_first_leaf_indices_all(std::vector<std::vector<int>> &indices, int n, int depth_max) {
for (int d = 0; d <= depth_max; ++d) {
std::vector<int> idx;
count_first_leaf_indices(idx, n, d);
indices.push_back(idx);
}
}
static double predict_theil_sen(double x, std::pair<double,double> beta) {
return beta.first + beta.second * x;
}
double get_candidate_set_size(int tree, int depth, int v) const {
return cs_sizes[depth - depth_min](v - 1, tree - 1);
}
double get_projection_time(int n_trees, int depth, int v) const {
return predict_theil_sen(n_trees * depth, beta_projection);
}
double get_voting_time(int n_trees, int depth, int v) const {
const std::map<int,std::pair<double,double>> &beta = beta_voting[depth - depth_min];
if (v <= 0 || beta.empty()) {
return 0.0;
}
for (const auto &b : beta) {
if (v <= b.first) {
return predict_theil_sen(n_trees, b.second);
}
}
return predict_theil_sen(n_trees, beta.rbegin()->second);
}
double get_exact_time(int n_trees, int depth, int v) const {
return predict_theil_sen(get_candidate_set_size(n_trees, depth, v), beta_exact);
}
double get_query_time(int tree, int depth, int v) const {
return get_projection_time(tree, depth, v)
+ get_voting_time(tree, depth, v)
+ get_exact_time(tree, depth, v);
}
std::vector<int> sample_indices(int n_test, int seed = 0) const {
std::random_device rd;
int s = seed ? seed : rd();
std::mt19937 gen(s);
std::vector<int> indices_data(n_samples);
std::iota(indices_data.begin(), indices_data.end(), 0);
std::shuffle(indices_data.begin(), indices_data.end(), gen);
return std::vector<int>(indices_data.begin(), indices_data.begin() + n_test);
}
Eigen::MatrixXf subset(const std::vector<int> &indices) const {
int n_test = indices.size();
Eigen::MatrixXf Q = Eigen::MatrixXf(dim, n_test);
for(int i = 0; i < n_test; ++i)
Q.col(i) = X.col(indices[i]);
return Q;
}
const Eigen::Map<const Eigen::MatrixXf> X; // the data matrix
Eigen::MatrixXf split_points; // all split points in all trees
std::vector<std::vector<int>> tree_leaves; // contains all leaves of all trees
Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> dense_random_matrix; // random vectors needed for all the RP-trees
Eigen::SparseMatrix<float, Eigen::RowMajor> sparse_random_matrix; // random vectors needed for all the RP-trees
std::vector<std::vector<int>> leaf_first_indices_all; // first indices for each level
std::vector<int> leaf_first_indices; // first indices of each leaf of tree in tree_leaves
const int n_samples; // sample size of data
const int dim; // dimension of data
Mrpt_Parameters par;
int n_trees = 0; // number of RP-trees
int depth = 0; // depth of an RP-tree with median split
float density = -1.0; // expected ratio of non-zero components in a projection matrix
int n_pool = 0; // amount of random vectors needed for all the RP-trees
int n_array = 0; // length of the one RP-tree as array
int votes = 0; // optimal number of votes to use
int k = 0;
enum itype {normal, autotuned, autotuned_unpruned};
itype index_type = normal;
// Member variables used in autotuning:
int depth_min = 0;
int votes_max = 0;
const double epsilon = 0.0001; // error bound for comparisons of recall levels
std::vector<Eigen::MatrixXd> cs_sizes;
std::pair<double,double> beta_projection, beta_exact;
std::vector<std::map<int,std::pair<double,double>>> beta_voting;
std::set<Mrpt_Parameters,decltype(is_faster)*> opt_pars;
};
#endif // CPP_MRPT_H_
|
task-taskgroup.c | /*
* task-taskgroup.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run | FileCheck %s
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#include "ompt/ompt-signal.h"
int main(int argc, char *argv[]) {
int var = 0, a = 0;
#pragma omp parallel num_threads(2) shared(var, a)
#pragma omp master
{
#pragma omp taskgroup
{
#pragma omp task shared(var, a)
{
var++;
OMPT_SIGNAL(a);
}
// Give other thread time to steal the task.
OMPT_WAIT(a, 1);
}
var++;
}
fprintf(stderr, "DONE\n");
int error = (var != 2);
return error;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK: DONE
|
GB_AxB_saxpy3_cumsum.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3_cumsum: finalize nnz(C(:,j)) and find cumulative sum of Cp
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// phase3: fine tasks finalize their computation nnz(C(:,j))
// phase4: cumulative sum of C->p
#include "GB_AxB_saxpy3.h"
int64_t GB_AxB_saxpy3_cumsum // return cjnz_max for fine tasks
(
GrB_Matrix C, // finalize C->p
GB_saxpy3task_struct *TaskList, // list of tasks, and workspace
int nfine, // number of fine tasks
double chunk, // chunk size
int nthreads // number of threads
)
{
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
int64_t *GB_RESTRICT Cp = C->p ;
const int64_t cvlen = C->vlen ;
const int64_t cnvec = C->nvec ;
//==========================================================================
// phase3: count nnz(C(:,j)) for fine tasks
//==========================================================================
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < nfine ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
// int64_t kk = TaskList [taskid].vector ;
int64_t hash_size = TaskList [taskid].hsize ;
bool use_Gustavson = (hash_size == cvlen) ;
int team_size = TaskList [taskid].team_size ;
int master = TaskList [taskid].master ;
int my_teamid = taskid - master ;
int64_t my_cjnz = 0 ;
if (use_Gustavson)
{
//------------------------------------------------------------------
// phase3: fine Gustavson task, C=A*B, C<M>=A*B, or C<!M>=A*B
//------------------------------------------------------------------
// Hf [i] == 2 if C(i,j) is an entry in C(:,j)
int8_t *GB_RESTRICT Hf = (int8_t *GB_RESTRICT) TaskList [taskid].Hf;
int64_t istart, iend ;
GB_PARTITION (istart, iend, cvlen, my_teamid, team_size) ;
for (int64_t i = istart ; i < iend ; i++)
{
if (Hf [i] == 2)
{
my_cjnz++ ;
}
}
}
else
{
//------------------------------------------------------------------
// phase3: fine hash task, C=A*B, C<M>=A*B, or C<!M>=A*B
//------------------------------------------------------------------
// (Hf [hash] & 3) == 2 if C(i,j) is an entry in C(:,j),
// and the index i of the entry is (Hf [hash] >> 2) - 1.
int64_t *GB_RESTRICT
Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ;
int64_t mystart, myend ;
GB_PARTITION (mystart, myend, hash_size, my_teamid, team_size) ;
for (int64_t hash = mystart ; hash < myend ; hash++)
{
if ((Hf [hash] & 3) == 2)
{
my_cjnz++ ;
}
}
}
TaskList [taskid].my_cjnz = my_cjnz ; // count my nnz(C(:,j))
}
//==========================================================================
// phase4: compute Cp with cumulative sum
//==========================================================================
// TaskList [taskid].my_cjnz is the # of unique entries found in C(:,j) by
// that task. Sum these terms to compute total # of entries in C(:,j).
for (taskid = 0 ; taskid < nfine ; taskid++)
{
int64_t kk = TaskList [taskid].vector ;
Cp [kk] = 0 ;
}
for (taskid = 0 ; taskid < nfine ; taskid++)
{
int64_t kk = TaskList [taskid].vector ;
int64_t my_cjnz = TaskList [taskid].my_cjnz ;
Cp [kk] += my_cjnz ;
ASSERT (my_cjnz <= cvlen) ;
}
// Cp [kk] is now nnz (C (:,j)), for all vectors j, whether computed by
// fine tasks or coarse tasks, and where j == (Bh == NULL) ? kk : Bh [kk].
int nth = GB_nthreads (cnvec, chunk, nthreads) ;
GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nth) ;
// cumulative sum of nnz (C (:,j)) for each team of fine tasks
int64_t cjnz_sum = 0 ;
int64_t cjnz_max = 0 ;
for (taskid = 0 ; taskid < nfine ; taskid++)
{
if (taskid == TaskList [taskid].master)
{
cjnz_sum = 0 ;
// also find the max (C (:,j)) for any fine hash tasks
int64_t hash_size = TaskList [taskid].hsize ;
bool use_Gustavson = (hash_size == cvlen) ;
if (!use_Gustavson)
{
int64_t kk = TaskList [taskid].vector ;
int64_t cjnz = Cp [kk+1] - Cp [kk] ;
cjnz_max = GB_IMAX (cjnz_max, cjnz) ;
}
}
int64_t my_cjnz = TaskList [taskid].my_cjnz ;
TaskList [taskid].my_cjnz = cjnz_sum ;
cjnz_sum += my_cjnz ;
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (cjnz_max) ;
}
|
NQueen-Paralelo.c | /*Proyecto Final de Algoritmos
Sebastian Gonzalo Vives Faus - A01025211
Sergio Hernandez Castillo - A01025210
Descripción: Problema de las N Reinas resuelto paralelizado
*/
#include <omp.h>
#include <stdio.h>
#include <stdbool.h>
//Variables globales
int n = 0; //Tamaño del tablero
int soluciones = 0;
bool imprimir = false;
//Metodo para imprimir
void print(int arr[]){
printf("Soulcion %d \n", soluciones);
for(int i =0; i<n;i++){
for(int j = 0; j<n; j++){
if(arr[i] == j){ //Si el valor en mi hilera es el mismo que en mi columna (Hay una reinas)
printf("Q ");
}//Close if
else{
printf("0 ");
}//Close else
}//Close for
printf("\n");
}//Close for
}//Close print()
//Metodo complementario a Reinas
void InsertarReinas(int arr[], int row, int col, int t_id){
/*Tenemos que comprobar en cada hilera si podemos insertar a una reina.
Para que cumpla la regla, ninguna reina se puede atacar entre si (siguiendo las reglas del ajedrez),
por lo que hay comprobar ataques verticales y diagonales.*/
for(int i = 0; i < row; i++){
//Ataques verticales
if (arr[i] == col){ //Si la reina se encuentra en la misma columna, nos salimos
//printf("V Ouch! en la hilera %d",row);
return;
}//Close if
//Ataques diagonales (Hay que utilizar la formula de diagonal)
if((abs(arr[i] - col) == (row-i))){
//printf("Q Ouch! en la hilera %d",row);
return;
}//Close if
}//Close for
/*Si ninguna de esos dos ataques se cumplen, podemos asumir que es seguro colocar la reina en esta
columna en la hilera actual.*/
arr[row] = col;
//Checamos si hay mas hileras o si llegamos a la hilera final
if(row == n-1){//Si llegamos a la ultima hilera y pusimos una reina, podemos asumir que la tabla es una solucion.
#pragma omp atomic
soluciones++; //Le sumamos 1 a las soluciones
//Aqui imprimimos la tabla, si el usuario eligio imprimirla
if(imprimir == true){
#pragma omp critical
{
/*Si el usuario decide imprimir las soluciones, imprimira la solucion encontrada, la igual que el
thread que la encontro.*/
printf("Thread %d encontro una solucion! \n",t_id);
print(arr);
}//Close pragma
}//Close if
}//Close if
/*Si aun quedan mas hileras, vamos a la siguiente hilera, tratando de encontrar una solucion poniendo una
reina en cada columna de la siguiente hilera.*/
else{
for(int i = 0; i<n; i++){
InsertarReinas(arr, row+1, i,t_id);
}//Clos for
}//Close else
}//Close InsertarReinas()
//Metodo principal
void Reinas(){
int t_id; //Variable para obtener el ID de cada thread
int i;
//AQUI MODIFICAMOS EL TIPO DE SCHEDULE, Aqui es donde implementamos nuestro parallel for.
#pragma omp parallel for schedule(guided,1) private(t_id)
for (i = 0; i<n; i++){
t_id = omp_get_thread_num(); //Obtenemos el id del thread
//Vamos a probar poniendo una reina en cada columna de la primer fila.
//printf("Buscando solucion en el tablero donde la reina empieza en la hilera 0 y columna %d \n", i);
int arr[n]; //A diferencia del secuencial, le asignamos un tablero a cada thread.
//Donde: arr (Arreglo/tablero), 0 (row 0 en donde vamos a probar acada reina), i (columna en donde ponemos la primer reina.)
InsertarReinas(arr, 0, i, t_id);
}//Close for
}//Close Reinas()
//Main
int main(int argc, const char* argv[]){
//Inilizacion al ejecutar el programa ./a.out
if(argc < 4){
printf("Error!: Ejecutar como ./a.out <tamaño del tablero> <numero de threads> <0/1 para imprimir tablas o no> \n");
return 0;
}//Close if
//Asignar el tama;o del tablero
n = atoi(argv[1]); //Donde argv[1] (string) es el numero que el usuario asigna al ejectuar el programa
//Obtener numero de threads
int threads = atoi(argv[2]);
omp_set_num_threads(threads);
//Si se imprimen las soluciones o no.
int option = atoi(argv[3]);
if(option == 1){
imprimir = true;
}//Close if
//Llamar al metodo
double start = omp_get_wtime();
Reinas();
double finish = omp_get_wtime();
printf("Soluciones con un tablero de %d x %d : %d con un tiempo de ejecucion de: %f \n",n,n,soluciones, finish-start);
}//Close main() |
GB_unaryop__identity_fp64_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_uint8
// op(A') function: GB_tran__identity_fp64_uint8
// C type: double
// A type: uint8_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_uint8
(
double *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distort.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/shear.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortImageMethod *method,const size_t number_arguments,
const double *arguments,size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) memset(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod);
if (image->matte == MagickFalse)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
InheritException(exception,&image->exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
*/
Image
*resize_alpha;
/* distort alpha channel separately */
(void) SeparateImageChannel(tmp_image,TrueAlphaChannel);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_alpha == (Image *) NULL )
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
(void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel);
(void) CompositeImage(resize_image,CopyOpacityCompositeOp,resize_alpha,
0,0);
InheritException(exception,&resize_image->exception);
resize_image->matte=image->matte;
resize_image->compose=image->compose;
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image,DistortImageMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if (method == ResizeDistortion)
{
if (number_arguments != 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t) arguments[0],
(size_t) arguments[1],exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff=GenerateCoefficients(image,&method,number_arguments,arguments,0,
exception);
if (coeff == (double *) NULL)
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t) ceil((coeff[0]-coeff[1])*
(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])*PerceptibleReciprocal(geometry.width); /* changed width */
coeff[7]=(coeff[0]-coeff[1])*PerceptibleReciprocal(geometry.height); /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidGeometry","`%s' `%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
ssize_t
i;
char image_gen[MaxTextExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method)
{
case AffineDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s","DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0],coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3],coeff[4],coeff[5]);
(void) FormatLocaleFile(stderr," %s' \\\n",lookup);
break;
}
case PerspectiveDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
"DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr,"Perspective Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort PerspectiveProjection \\\n '");
for (i=0; i < 4; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for ( ; i < 7; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(),
inverse[7]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr,"Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%.1024s",image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," rr=%+.*g*ii %+.*g*jj + 1;\n",
GetMagickPrecision(),coeff[6],GetMagickPrecision(),coeff[7]);
(void) FormatLocaleFile(stderr,
" xx=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1],
GetMagickPrecision(),coeff[2]);
(void) FormatLocaleFile(stderr,
" yy=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4],
GetMagickPrecision(),coeff[5]);
(void) FormatLocaleFile(stderr," rr%s0 ? %s : blue' \\\n",
coeff[8] < 0.0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
{
(void) FormatLocaleFile(stderr,"BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4],coeff[5],coeff[6],coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",0.5-coeff[3],0.5-
coeff[7]);
(void) FormatLocaleFile(stderr," bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if (coeff[9] != 0)
{
(void) FormatLocaleFile(stderr,
" rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",-2*coeff[9],coeff[4],
-coeff[0]);
(void) FormatLocaleFile(stderr,
" yy=( -bb + sqrt(rt) ) / %lf;\n",coeff[9]);
}
else
(void) FormatLocaleFile(stderr," yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4],coeff[0]);
(void) FormatLocaleFile(stderr,
" xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",-coeff[1],coeff[0],
coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr," (rt < 0 ) ? red : %s'\n",
lookup);
else
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BilinearReverseDistortion:
{
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr,
" xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[0],coeff[1],
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr,
" yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[4],coeff[5],
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr,
"Polynomial (order %lg, terms %lu), FX Equivelent\n",coeff[0],
(unsigned long) nterms);
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n yy =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr,"\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr,"Arc Distort, Internal Coefficients:\n");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr,
" c%.20g = %+lf\n",(double) i,coeff[i]);
(void) FormatLocaleFile(stderr,"Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr," xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*%lf %+lf;\n",coeff[1],
coeff[4]);
(void) FormatLocaleFile(stderr,
" yy=(%lf - hypot(ii,jj)) * %lf;\n",coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr,"Polar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",-coeff[2],-coeff[3]);
(void) FormatLocaleFile(stderr," xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr," yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1],coeff[7] );
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr,
"DePolar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'aa=(i+.5)*%lf %+lf;\n",
coeff[6],+coeff[4]);
(void) FormatLocaleFile(stderr," rr=(j+.5)*%lf %+lf;\n",
coeff[7],+coeff[1]);
(void) FormatLocaleFile(stderr," xx=rr*sin(aa) %+lf;\n",
coeff[2]);
(void) FormatLocaleFile(stderr," yy=rr*cos(aa) %+lf;\n",
coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," aa=atan(ii/%+lf);\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*aa%+lf;\n",
coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=jj*cos(aa)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," ii=ii/%+lf;\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*tan(ii)%+lf;\n",coeff[1],
coeff[2] );
(void) FormatLocaleFile(stderr," yy=jj/cos(ii)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
double
xc,
yc;
/*
NOTE: This does the barrel roll in pixel coords not image coords
The internal distortion must do it in image coordinates, so that is
what the center coeff (8,9) is given in.
*/
xc=((double)image->columns-1.0)/2.0+image->page.x;
yc=((double)image->rows-1.0)/2.0+image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr," -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr," -fx 'xc=%lf; yc=%lf;\n",coeff[8]-
0.5,coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr,
" ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[0],coeff[1],coeff[2],
coeff[3]);
(void) FormatLocaleFile(stderr,
" jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[4],coeff[5],coeff[6],
coeff[7]);
(void) FormatLocaleFile(stderr," v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/*
The user provided a 'scale' expert option will scale the output image size,
by the factor given allowing for super-sampling of the distorted image
space. Any scaling factors must naturally be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s","-define distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
return((Image *) NULL);
}
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass) == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
InheritException(exception,&distort_image->exception);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace);
if (distort_image->background_color.opacity != OpaqueOpacity)
distort_image->matte=MagickTrue;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetMagickPixelPacket(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
MagickPixelPacket
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
IndexPacket
*magick_restrict indexes;
ssize_t
i;
PixelPacket
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(distort_view);
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
GetMagickPixelPacket(distort_image,&invalid);
SetMagickPixelPacket(distort_image,&distort_image->matte_color,
(IndexPacket *) NULL, &invalid);
if (distort_image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&invalid); /* what about other color spaces? */
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) ((coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5);
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 1
/*if ( i == 0 && j == 0 ) {*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelPacket(distort_image,&invalid,q,indexes);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
MagickPixelCompositeBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelPacket(distort_image,&pixel,q,indexes);
}
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DistortImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff=(double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
MagickRealType
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,const ChannelType channel,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o channel: Specify which color values (in RGBKA sequence) are being set.
% This also determines the number of color_values in above.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const ChannelType channel,const SparseColorMethod method,
const size_t number_arguments,const double *arguments,
ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ( channel & RedChannel ) number_colors++;
if ( channel & GreenChannel ) number_colors++;
if ( channel & BlueChannel ) number_colors++;
if ( channel & IndexChannel ) number_colors++;
if ( channel & OpacityChannel ) number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortImageMethod
distort_method;
distort_method=(DistortImageMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
InheritException(exception,&image->exception);
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel; /* pixel to assign to distorted image */
IndexPacket
*magick_restrict indexes;
ssize_t
i;
PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(sparse_view);
GetMagickPixelPacket(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
SetMagickPixelPacket(image,q,indexes,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ( channel & RedChannel ) pixel.red = 0.0;
if ( channel & GreenChannel ) pixel.green = 0.0;
if ( channel & BlueChannel ) pixel.blue = 0.0;
if ( channel & IndexChannel ) pixel.index = 0.0;
if ( channel & OpacityChannel ) pixel.opacity = 0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ( channel & RedChannel )
pixel.red += arguments[x++]*weight;
if ( channel & GreenChannel )
pixel.green += arguments[x++]*weight;
if ( channel & BlueChannel )
pixel.blue += arguments[x++]*weight;
if ( channel & IndexChannel )
pixel.index += arguments[x++]*weight;
if ( channel & OpacityChannel )
pixel.opacity += arguments[x++]*weight;
denominator += weight;
}
if ( channel & RedChannel ) pixel.red /= denominator;
if ( channel & GreenChannel ) pixel.green /= denominator;
if ( channel & BlueChannel ) pixel.blue /= denominator;
if ( channel & IndexChannel ) pixel.index /= denominator;
if ( channel & OpacityChannel ) pixel.opacity /= denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ( channel & RedChannel )
pixel.red=ClampPixel(QuantumRange*pixel.red);
if ( channel & GreenChannel )
pixel.green=ClampPixel(QuantumRange*pixel.green);
if ( channel & BlueChannel )
pixel.blue=ClampPixel(QuantumRange*pixel.blue);
if ( channel & IndexChannel )
pixel.index=ClampPixel(QuantumRange*pixel.index);
if ( channel & OpacityChannel )
pixel.opacity=ClampPixel(QuantumRange*pixel.opacity);
SetPixelPacket(sparse_image,&pixel,q,indexes);
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SparseColorTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
ref4.c | size_t i, j;
/* alias input parameters */
const double (*restrict tinit)[p->N][p->M] = (const double (*)[p->N][p->M])p->tinit;
const double (*restrict cinit)[p->N][p->M] = (const double (*)[p->N][p->M])p->conductivity;
omp_set_num_threads(4);
/* allocate grid data */
const size_t h = p->N + 2;
const size_t w = p->M + 2;
double (*restrict g1)[h][w] = malloc(h * w * sizeof(double));
double (*restrict g2)[h][w] = malloc(h * w * sizeof(double));
/* allocate halo for conductivities */
double (*restrict c)[h][w] = malloc(h * w * sizeof(double));
/* double (*restrict g1)[h][w];
double (*restrict g2)[h][w];
/* allocate halo for conductivities *
double (*restrict c)[h][w];
#pragma omp parallel sections
{
#pragma omp section
{
g1 = malloc(h * w * sizeof(double));
}
#pragma omp section
{
g2 = malloc(h * w * sizeof(double));
}
#pragma omp section
{
c = malloc(h * w * sizeof(double));
}
}*/
struct timeval before;
static const double c_cdir = 0.25 * M_SQRT2 / (M_SQRT2 + 1.0);
static const double c_cdiag = 0.25 / (M_SQRT2 + 1.0);
#pragma omp parallel private(i,j)
{
printf("GHJG%d\n",omp_get_num_threads());
/* set initial temperatures and conductivities */
#pragma omp for schedule(static) collapse(2) nowait
for (i = 1; i < h - 1; ++i)
for (j = 1; j < w - 1; ++j)
{
(*g1)[i][j] = (*tinit)[i-1][j-1];
(*c)[i][j] = (*cinit)[i-1][j-1];
}
#pragma omp for nowait//schedule(static, 1)
/* smear outermost row to border */
for (j = 1; j < w-1; ++j) {
(*g1)[0][j] = (*g2)[0][j] = (*g1)[1][j];
(*g1)[h-1][j] = (*g2)[h-1][j] = (*g1)[h-2][j];
}
}
/* compute */
size_t iter;
double (*restrict src)[h][w] = g2;
double (*restrict dst)[h][w] = g1;
/*
* If initialization should be included in the timings
* could be a point of discussion.
*/
gettimeofday(&before, NULL);
for (iter = 1; iter <= p->maxiter; ++iter)
{
#ifdef GEN_PICTURES
do_draw(p, iter, h, w, src);
#endif
/* swap source and destination */
{ void *tmp = src; src = dst; dst = tmp; }
/* initialize halo on source */
do_copy(h, w, src);
#pragma omp parallel private(i,j)
{
/* compute */
#pragma omp for schedule(static ) collapse(2)
for (i = 1; i < h - 1; ++i)
for (j = 1; j < w - 1; ++j)
{
double w = (*c)[i][j];
double restw = 1.0 - w;
(*dst)[i][j] = w * (*src)[i][j] +
((*src)[i+1][j ] + (*src)[i-1][j ] +
(*src)[i ][j+1] + (*src)[i ][j-1]) * (restw * c_cdir) +
((*src)[i-1][j-1] + (*src)[i-1][j+1] +
(*src)[i+1][j-1] + (*src)[i+1][j+1]) * (restw * c_cdiag);
}
}
/* conditional reporting */
if (iter % p->period == 0) {
if(fill_report(p, r, h, w, dst, src, iter, &before)) {iter++; continue;}
if(p->printreports) report_results(p, r);
}
}
/* report at end in all cases */
iter--;
fill_report(p, r, h, w, dst, src, iter, &before);
free(c);
free(g2);
free(g1); |
DRB063-outeronly1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
*/
#include <omp.h>
int n = 100;
int m = 100;
double b[100][100];
int init()
{
int i;
int j;
int k;
#pragma omp parallel for private (i,j) firstprivate (n,m)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= m - 1; j += 1) {
b[i][j] = (i * j);
}
}
return 0;
}
void foo()
{
int i;
int j;
#pragma omp parallel for private (i,j) firstprivate (n,m)
for (i = 0; i <= n - 1; i += 1) {
// Be careful about bounds of j
for (j = 0; j <= m - 1 - 1; j += 1) {
b[i][j] = b[i][j + 1];
}
}
}
int print()
{
int i;
int j;
int k;
for (i = 0; i <= n - 1; i += 1) {
for (j = 0; j <= m - 1; j += 1) {
printf("%lf\n",b[i][j]);
}
}
return 0;
}
int main()
{
init();
foo();
print();
return 0;
}
|
s_aatritemp.h | /*
* Mesa 3-D graphics library
*
* Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Antialiased Triangle Rasterizer Template
*
* This file is #include'd to generate custom AA triangle rasterizers.
* NOTE: this code hasn't been optimized yet. That'll come after it
* works correctly.
*
* The following macros may be defined to indicate what auxillary information
* must be copmuted across the triangle:
* DO_Z - if defined, compute Z values
* DO_ATTRIBS - if defined, compute texcoords, varying, etc.
*/
/*void triangle( struct gl_context *ctx, GLuint v0, GLuint v1, GLuint v2, GLuint pv )*/
{
const SWcontext *swrast = SWRAST_CONTEXT(ctx);
const GLfloat *p0 = v0->attrib[VARYING_SLOT_POS];
const GLfloat *p1 = v1->attrib[VARYING_SLOT_POS];
const GLfloat *p2 = v2->attrib[VARYING_SLOT_POS];
const SWvertex *vMin, *vMid, *vMax;
GLint iyMin, iyMax;
GLfloat yMin, yMax;
GLboolean ltor;
GLfloat majDx, majDy; /* major (i.e. long) edge dx and dy */
SWspan span;
#ifdef DO_Z
GLfloat zPlane[4];
#endif
GLfloat rPlane[4], gPlane[4], bPlane[4], aPlane[4];
#if defined(DO_ATTRIBS)
GLfloat attrPlane[VARYING_SLOT_MAX][4][4];
GLfloat wPlane[4]; /* win[3] */
#endif
GLfloat bf = SWRAST_CONTEXT(ctx)->_BackfaceCullSign;
(void) swrast;
INIT_SPAN(span, GL_POLYGON);
span.arrayMask = SPAN_COVERAGE;
/* determine bottom to top order of vertices */
{
GLfloat y0 = v0->attrib[VARYING_SLOT_POS][1];
GLfloat y1 = v1->attrib[VARYING_SLOT_POS][1];
GLfloat y2 = v2->attrib[VARYING_SLOT_POS][1];
if (y0 <= y1) {
if (y1 <= y2) {
vMin = v0; vMid = v1; vMax = v2; /* y0<=y1<=y2 */
}
else if (y2 <= y0) {
vMin = v2; vMid = v0; vMax = v1; /* y2<=y0<=y1 */
}
else {
vMin = v0; vMid = v2; vMax = v1; bf = -bf; /* y0<=y2<=y1 */
}
}
else {
if (y0 <= y2) {
vMin = v1; vMid = v0; vMax = v2; bf = -bf; /* y1<=y0<=y2 */
}
else if (y2 <= y1) {
vMin = v2; vMid = v1; vMax = v0; bf = -bf; /* y2<=y1<=y0 */
}
else {
vMin = v1; vMid = v2; vMax = v0; /* y1<=y2<=y0 */
}
}
}
majDx = vMax->attrib[VARYING_SLOT_POS][0] - vMin->attrib[VARYING_SLOT_POS][0];
majDy = vMax->attrib[VARYING_SLOT_POS][1] - vMin->attrib[VARYING_SLOT_POS][1];
/* front/back-face determination and cullling */
{
const GLfloat botDx = vMid->attrib[VARYING_SLOT_POS][0] - vMin->attrib[VARYING_SLOT_POS][0];
const GLfloat botDy = vMid->attrib[VARYING_SLOT_POS][1] - vMin->attrib[VARYING_SLOT_POS][1];
const GLfloat area = majDx * botDy - botDx * majDy;
/* Do backface culling */
if (area * bf < 0 || area == 0 || IS_INF_OR_NAN(area))
return;
ltor = (GLboolean) (area < 0.0F);
span.facing = area * swrast->_BackfaceSign > 0.0F;
}
/* Plane equation setup:
* We evaluate plane equations at window (x,y) coordinates in order
* to compute color, Z, fog, texcoords, etc. This isn't terribly
* efficient but it's easy and reliable.
*/
#ifdef DO_Z
compute_plane(p0, p1, p2, p0[2], p1[2], p2[2], zPlane);
span.arrayMask |= SPAN_Z;
#endif
if (ctx->Light.ShadeModel == GL_SMOOTH) {
compute_plane(p0, p1, p2, v0->color[RCOMP], v1->color[RCOMP], v2->color[RCOMP], rPlane);
compute_plane(p0, p1, p2, v0->color[GCOMP], v1->color[GCOMP], v2->color[GCOMP], gPlane);
compute_plane(p0, p1, p2, v0->color[BCOMP], v1->color[BCOMP], v2->color[BCOMP], bPlane);
compute_plane(p0, p1, p2, v0->color[ACOMP], v1->color[ACOMP], v2->color[ACOMP], aPlane);
}
else {
constant_plane(v2->color[RCOMP], rPlane);
constant_plane(v2->color[GCOMP], gPlane);
constant_plane(v2->color[BCOMP], bPlane);
constant_plane(v2->color[ACOMP], aPlane);
}
span.arrayMask |= SPAN_RGBA;
#if defined(DO_ATTRIBS)
{
const GLfloat invW0 = v0->attrib[VARYING_SLOT_POS][3];
const GLfloat invW1 = v1->attrib[VARYING_SLOT_POS][3];
const GLfloat invW2 = v2->attrib[VARYING_SLOT_POS][3];
compute_plane(p0, p1, p2, invW0, invW1, invW2, wPlane);
span.attrStepX[VARYING_SLOT_POS][3] = plane_dx(wPlane);
span.attrStepY[VARYING_SLOT_POS][3] = plane_dy(wPlane);
ATTRIB_LOOP_BEGIN
GLuint c;
if (swrast->_InterpMode[attr] == GL_FLAT) {
for (c = 0; c < 4; c++) {
constant_plane(v2->attrib[attr][c] * invW2, attrPlane[attr][c]);
}
}
else {
for (c = 0; c < 4; c++) {
const GLfloat a0 = v0->attrib[attr][c] * invW0;
const GLfloat a1 = v1->attrib[attr][c] * invW1;
const GLfloat a2 = v2->attrib[attr][c] * invW2;
compute_plane(p0, p1, p2, a0, a1, a2, attrPlane[attr][c]);
}
}
for (c = 0; c < 4; c++) {
span.attrStepX[attr][c] = plane_dx(attrPlane[attr][c]);
span.attrStepY[attr][c] = plane_dy(attrPlane[attr][c]);
}
ATTRIB_LOOP_END
}
#endif
/* Begin bottom-to-top scan over the triangle.
* The long edge will either be on the left or right side of the
* triangle. We always scan from the long edge toward the shorter
* edges, stopping when we find that coverage = 0. If the long edge
* is on the left we scan left-to-right. Else, we scan right-to-left.
*/
yMin = vMin->attrib[VARYING_SLOT_POS][1];
yMax = vMax->attrib[VARYING_SLOT_POS][1];
iyMin = (GLint) yMin;
iyMax = (GLint) yMax + 1;
if (ltor) {
/* scan left to right */
const GLfloat *pMin = vMin->attrib[VARYING_SLOT_POS];
const GLfloat *pMid = vMid->attrib[VARYING_SLOT_POS];
const GLfloat *pMax = vMax->attrib[VARYING_SLOT_POS];
const GLfloat dxdy = majDx / majDy;
const GLfloat xAdj = dxdy < 0.0F ? -dxdy : 0.0F;
GLint iy;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) private(iy) firstprivate(span)
#endif
for (iy = iyMin; iy < iyMax; iy++) {
GLfloat x = pMin[0] - (yMin - iy) * dxdy;
GLint ix, startX = (GLint) (x - xAdj);
GLuint count;
GLfloat coverage = 0.0F;
#ifdef _OPENMP
/* each thread needs to use a different (global) SpanArrays variable */
span.array = SWRAST_CONTEXT(ctx)->SpanArrays + omp_get_thread_num();
#endif
/* skip over fragments with zero coverage */
while (startX < SWRAST_MAX_WIDTH) {
coverage = compute_coveragef(pMin, pMid, pMax, startX, iy);
if (coverage > 0.0F)
break;
startX++;
}
/* enter interior of triangle */
ix = startX;
#if defined(DO_ATTRIBS)
/* compute attributes at left-most fragment */
span.attrStart[VARYING_SLOT_POS][3] = solve_plane(ix + 0.5F, iy + 0.5F, wPlane);
ATTRIB_LOOP_BEGIN
GLuint c;
for (c = 0; c < 4; c++) {
span.attrStart[attr][c] = solve_plane(ix + 0.5F, iy + 0.5F, attrPlane[attr][c]);
}
ATTRIB_LOOP_END
#endif
count = 0;
while (coverage > 0.0F) {
/* (cx,cy) = center of fragment */
const GLfloat cx = ix + 0.5F, cy = iy + 0.5F;
SWspanarrays *array = span.array;
array->coverage[count] = coverage;
#ifdef DO_Z
array->z[count] = (GLuint) solve_plane(cx, cy, zPlane);
#endif
array->rgba[count][RCOMP] = solve_plane_chan(cx, cy, rPlane);
array->rgba[count][GCOMP] = solve_plane_chan(cx, cy, gPlane);
array->rgba[count][BCOMP] = solve_plane_chan(cx, cy, bPlane);
array->rgba[count][ACOMP] = solve_plane_chan(cx, cy, aPlane);
ix++;
count++;
coverage = compute_coveragef(pMin, pMid, pMax, ix, iy);
}
if (ix > startX) {
span.x = startX;
span.y = iy;
span.end = (GLuint) ix - (GLuint) startX;
_swrast_write_rgba_span(ctx, &span);
}
}
}
else {
/* scan right to left */
const GLfloat *pMin = vMin->attrib[VARYING_SLOT_POS];
const GLfloat *pMid = vMid->attrib[VARYING_SLOT_POS];
const GLfloat *pMax = vMax->attrib[VARYING_SLOT_POS];
const GLfloat dxdy = majDx / majDy;
const GLfloat xAdj = dxdy > 0 ? dxdy : 0.0F;
GLint iy;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) private(iy) firstprivate(span)
#endif
for (iy = iyMin; iy < iyMax; iy++) {
GLfloat x = pMin[0] - (yMin - iy) * dxdy;
GLint ix, left, startX = (GLint) (x + xAdj);
GLuint count, n;
GLfloat coverage = 0.0F;
#ifdef _OPENMP
/* each thread needs to use a different (global) SpanArrays variable */
span.array = SWRAST_CONTEXT(ctx)->SpanArrays + omp_get_thread_num();
#endif
/* make sure we're not past the window edge */
if (startX >= ctx->DrawBuffer->_Xmax) {
startX = ctx->DrawBuffer->_Xmax - 1;
}
/* skip fragments with zero coverage */
while (startX > 0) {
coverage = compute_coveragef(pMin, pMax, pMid, startX, iy);
if (coverage > 0.0F)
break;
startX--;
}
/* enter interior of triangle */
ix = startX;
count = 0;
while (coverage > 0.0F) {
/* (cx,cy) = center of fragment */
const GLfloat cx = ix + 0.5F, cy = iy + 0.5F;
SWspanarrays *array = span.array;
assert(ix >= 0);
array->coverage[ix] = coverage;
#ifdef DO_Z
array->z[ix] = (GLuint) solve_plane(cx, cy, zPlane);
#endif
array->rgba[ix][RCOMP] = solve_plane_chan(cx, cy, rPlane);
array->rgba[ix][GCOMP] = solve_plane_chan(cx, cy, gPlane);
array->rgba[ix][BCOMP] = solve_plane_chan(cx, cy, bPlane);
array->rgba[ix][ACOMP] = solve_plane_chan(cx, cy, aPlane);
ix--;
count++;
coverage = compute_coveragef(pMin, pMax, pMid, ix, iy);
}
#if defined(DO_ATTRIBS)
/* compute attributes at left-most fragment */
span.attrStart[VARYING_SLOT_POS][3] = solve_plane(ix + 1.5F, iy + 0.5F, wPlane);
ATTRIB_LOOP_BEGIN
GLuint c;
for (c = 0; c < 4; c++) {
span.attrStart[attr][c] = solve_plane(ix + 1.5F, iy + 0.5F, attrPlane[attr][c]);
}
ATTRIB_LOOP_END
#endif
if (startX > ix) {
n = (GLuint) startX - (GLuint) ix;
left = ix + 1;
/* shift all values to the left */
/* XXX this is temporary */
{
SWspanarrays *array = span.array;
GLint j;
for (j = 0; j < (GLint) n; j++) {
array->coverage[j] = array->coverage[j + left];
COPY_CHAN4(array->rgba[j], array->rgba[j + left]);
#ifdef DO_Z
array->z[j] = array->z[j + left];
#endif
}
}
span.x = left;
span.y = iy;
span.end = n;
_swrast_write_rgba_span(ctx, &span);
}
}
}
}
#undef DO_Z
#undef DO_ATTRIBS
#undef DO_OCCLUSION_TEST
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.